source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
convolution_3x3_pack8_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd63_transform_kernel_pack8_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch, const Option& opt)
{
// winograd63 transform kernel
Mat kernel_tm;
kernel_tm.create(8 * 8, inch, outch);
const float ktm[8][3] = {
{1.0f, 0.0f, 0.0f},
{-2.0f / 9, -2.0f / 9, -2.0f / 9},
{-2.0f / 9, 2.0f / 9, -2.0f / 9},
{1.0f / 90, 1.0f / 45, 2.0f / 45},
{1.0f / 90, -1.0f / 45, 2.0f / 45},
{1.0f / 45, 1.0f / 90, 1.0f / 180},
{1.0f / 45, -1.0f / 90, 1.0f / 180},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel, transposed
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[8][3];
for (int i = 0; i < 8; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// v
for (int j = 0; j < 8; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 8; i++)
{
kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 64-inch-outch
// dst = 8b-8a-inch/8a-64-outch/8b
kernel_tm_pack8.create(inch / 8, 64, outch / 8, (size_t)2u * 64, 64);
int q = 0;
for (; q + 7 < outch; q += 8)
{
const Mat k0 = kernel_tm.channel(q);
const Mat k1 = kernel_tm.channel(q + 1);
const Mat k2 = kernel_tm.channel(q + 2);
const Mat k3 = kernel_tm.channel(q + 3);
const Mat k4 = kernel_tm.channel(q + 4);
const Mat k5 = kernel_tm.channel(q + 5);
const Mat k6 = kernel_tm.channel(q + 6);
const Mat k7 = kernel_tm.channel(q + 7);
Mat g0 = kernel_tm_pack8.channel(q / 8);
for (int k = 0; k < 64; k++)
{
__fp16* g00 = g0.row<__fp16>(k);
for (int p = 0; p + 7 < inch; p += 8)
{
for (int i = 0; i < 8; i++)
{
const float* k00 = k0.row(p + i);
const float* k10 = k1.row(p + i);
const float* k20 = k2.row(p + i);
const float* k30 = k3.row(p + i);
const float* k40 = k4.row(p + i);
const float* k50 = k5.row(p + i);
const float* k60 = k6.row(p + i);
const float* k70 = k7.row(p + i);
g00[0] = (__fp16)k00[k];
g00[1] = (__fp16)k10[k];
g00[2] = (__fp16)k20[k];
g00[3] = (__fp16)k30[k];
g00[4] = (__fp16)k40[k];
g00[5] = (__fp16)k50[k];
g00[6] = (__fp16)k60[k];
g00[7] = (__fp16)k70[k];
g00 += 8;
}
}
}
}
}
static void conv3x3s1_winograd63_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tiles = outw / 6;
int h_tiles = outh / 6;
const int tiles = w_tiles * h_tiles;
bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
conv3x3s1_winograd63_transform_input_pack8_fp16sa_neon(bottom_blob_bordered, bottom_blob_tm, opt);
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
convolution_winograd_dot_pack8_fp16sa_neon(bottom_blob_tm, outch, kernel_tm, top_blob_tm, opt);
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
conv3x3s1_winograd63_transform_output_pack8_fp16sa_neon(top_blob_tm, top_blob_bordered, bias, opt);
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd43_transform_kernel_pack8_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch, const Option& opt)
{
// winograd43 transform kernel
Mat kernel_tm(6 * 6, inch, outch);
const float ktm[6][3] = {
{1.0f / 4, 0.0f, 0.0f},
{-1.0f / 6, -1.0f / 6, -1.0f / 6},
{-1.0f / 6, 1.0f / 6, -1.0f / 6},
{1.0f / 24, 1.0f / 12, 1.0f / 6},
{1.0f / 24, -1.0f / 12, 1.0f / 6},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 36-inch-outch
// dst = 8b-8a-inch/8a-36-outch/8b
kernel_tm_pack8.create(inch / 8, 36, outch / 8, (size_t)2u * 64, 64);
int q = 0;
for (; q + 7 < outch; q += 8)
{
const Mat k0 = kernel_tm.channel(q);
const Mat k1 = kernel_tm.channel(q + 1);
const Mat k2 = kernel_tm.channel(q + 2);
const Mat k3 = kernel_tm.channel(q + 3);
const Mat k4 = kernel_tm.channel(q + 4);
const Mat k5 = kernel_tm.channel(q + 5);
const Mat k6 = kernel_tm.channel(q + 6);
const Mat k7 = kernel_tm.channel(q + 7);
Mat g0 = kernel_tm_pack8.channel(q / 8);
for (int k = 0; k < 36; k++)
{
__fp16* g00 = g0.row<__fp16>(k);
for (int p = 0; p + 7 < inch; p += 8)
{
for (int i = 0; i < 8; i++)
{
const float* k00 = k0.row(p + i);
const float* k10 = k1.row(p + i);
const float* k20 = k2.row(p + i);
const float* k30 = k3.row(p + i);
const float* k40 = k4.row(p + i);
const float* k50 = k5.row(p + i);
const float* k60 = k6.row(p + i);
const float* k70 = k7.row(p + i);
g00[0] = (__fp16)k00[k];
g00[1] = (__fp16)k10[k];
g00[2] = (__fp16)k20[k];
g00[3] = (__fp16)k30[k];
g00[4] = (__fp16)k40[k];
g00[5] = (__fp16)k50[k];
g00[6] = (__fp16)k60[k];
g00[7] = (__fp16)k70[k];
g00 += 8;
}
}
}
}
}
static void conv3x3s1_winograd43_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tiles = outw / 4;
int h_tiles = outh / 4;
const int tiles = w_tiles * h_tiles;
bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
conv3x3s1_winograd43_transform_input_pack8_fp16sa_neon(bottom_blob_bordered, bottom_blob_tm, opt);
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
convolution_winograd_dot_pack8_fp16sa_neon(bottom_blob_tm, outch, kernel_tm, top_blob_tm, opt);
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
conv3x3s1_winograd43_transform_output_pack8_fp16sa_neon(top_blob_tm, top_blob_bordered, bias, opt);
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd23_transform_kernel_pack8_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch, const Option& opt)
{
// winograd23 transform kernel
Mat kernel_tm(4 * 4, inch, outch);
const float ktm[4][3] = {
{1.0f, 0.0f, 0.0f},
{1.0f / 2, 1.0f / 2, 1.0f / 2},
{1.0f / 2, -1.0f / 2, 1.0f / 2},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[4][3];
for (int i = 0; i < 4; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 4; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 4; i++)
{
kernel_tm0[j * 4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 16-inch-outch
// dst = 8b-8a-inch/8a-16-outch/8b
kernel_tm_pack8.create(inch / 8, 16, outch / 8, (size_t)2u * 64, 64);
int q = 0;
for (; q + 7 < outch; q += 8)
{
const Mat k0 = kernel_tm.channel(q);
const Mat k1 = kernel_tm.channel(q + 1);
const Mat k2 = kernel_tm.channel(q + 2);
const Mat k3 = kernel_tm.channel(q + 3);
const Mat k4 = kernel_tm.channel(q + 4);
const Mat k5 = kernel_tm.channel(q + 5);
const Mat k6 = kernel_tm.channel(q + 6);
const Mat k7 = kernel_tm.channel(q + 7);
Mat g0 = kernel_tm_pack8.channel(q / 8);
for (int k = 0; k < 16; k++)
{
__fp16* g00 = g0.row<__fp16>(k);
for (int p = 0; p + 7 < inch; p += 8)
{
for (int i = 0; i < 8; i++)
{
const float* k00 = k0.row(p + i);
const float* k10 = k1.row(p + i);
const float* k20 = k2.row(p + i);
const float* k30 = k3.row(p + i);
const float* k40 = k4.row(p + i);
const float* k50 = k5.row(p + i);
const float* k60 = k6.row(p + i);
const float* k70 = k7.row(p + i);
g00[0] = (__fp16)k00[k];
g00[1] = (__fp16)k10[k];
g00[2] = (__fp16)k20[k];
g00[3] = (__fp16)k30[k];
g00[4] = (__fp16)k40[k];
g00[5] = (__fp16)k50[k];
g00[6] = (__fp16)k60[k];
g00[7] = (__fp16)k70[k];
g00 += 8;
}
}
}
}
}
static void conv3x3s1_winograd23_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 2n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 1) / 2 * 2;
outh = (outh + 1) / 2 * 2;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tiles = outw / 2;
int h_tiles = outh / 2;
const int tiles = w_tiles * h_tiles;
bottom_blob_tm.create(tiles, 16, inch, elemsize, elempack, opt.workspace_allocator);
conv3x3s1_winograd23_transform_input_pack8_fp16sa_neon(bottom_blob_bordered, bottom_blob_tm, opt);
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
convolution_winograd_dot_pack8_fp16sa_neon(bottom_blob_tm, outch, kernel_tm, top_blob_tm, opt);
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
conv3x3s1_winograd23_transform_output_pack8_fp16sa_neon(top_blob_tm, top_blob_bordered, bias, opt);
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const __fp16* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
float16x8_t _bias0 = bias ? vld1q_f16(bias + p * 8) : vdupq_n_f16(0.f);
out0.fill(_bias0);
for (int q = 0; q < inch; q++)
{
__fp16* outptr0 = out0.row<__fp16>(0);
const Mat img0 = bottom_blob.channel(q);
const __fp16* r0 = img0.row<const __fp16>(0);
const __fp16* r1 = img0.row<const __fp16>(1);
const __fp16* r2 = img0.row<const __fp16>(2);
const __fp16* kptr = kernel.channel(p).row<const __fp16>(q);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
asm volatile(
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" // r00 r01 r02 r03
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0] \n" // sum0
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v4.8h, v5.8h}, [%1] \n" // r04 r05
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v16.8h, v1.h[0] \n"
"fmla v30.8h, v16.8h, v2.h[0] \n"
"fmla v31.8h, v16.8h, v3.h[0] \n"
"fmla v28.8h, v17.8h, v0.h[1] \n"
"fmla v29.8h, v17.8h, v1.h[1] \n"
"fmla v30.8h, v17.8h, v2.h[1] \n"
"fmla v31.8h, v17.8h, v3.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v1.h[2] \n"
"fmla v30.8h, v18.8h, v2.h[2] \n"
"fmla v31.8h, v18.8h, v3.h[2] \n"
"fmla v28.8h, v19.8h, v0.h[3] \n"
"fmla v29.8h, v19.8h, v1.h[3] \n"
"fmla v30.8h, v19.8h, v2.h[3] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v1.h[4] \n"
"fmla v30.8h, v20.8h, v2.h[4] \n"
"fmla v31.8h, v20.8h, v3.h[4] \n"
"fmla v28.8h, v21.8h, v0.h[5] \n"
"fmla v29.8h, v21.8h, v1.h[5] \n"
"fmla v30.8h, v21.8h, v2.h[5] \n"
"fmla v31.8h, v21.8h, v3.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v1.h[6] \n"
"fmla v30.8h, v22.8h, v2.h[6] \n"
"fmla v31.8h, v22.8h, v3.h[6] \n"
"fmla v28.8h, v23.8h, v0.h[7] \n"
"fmla v29.8h, v23.8h, v1.h[7] \n"
"fmla v30.8h, v23.8h, v2.h[7] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v16.8h, v3.h[0] \n"
"fmla v31.8h, v16.8h, v4.h[0] \n"
"fmla v28.8h, v17.8h, v1.h[1] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
"fmla v30.8h, v17.8h, v3.h[1] \n"
"fmla v31.8h, v17.8h, v4.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v18.8h, v3.h[2] \n"
"fmla v31.8h, v18.8h, v4.h[2] \n"
"fmla v28.8h, v19.8h, v1.h[3] \n"
"fmla v29.8h, v19.8h, v2.h[3] \n"
"fmla v30.8h, v19.8h, v3.h[3] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v20.8h, v3.h[4] \n"
"fmla v31.8h, v20.8h, v4.h[4] \n"
"fmla v28.8h, v21.8h, v1.h[5] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"fmla v30.8h, v21.8h, v3.h[5] \n"
"fmla v31.8h, v21.8h, v4.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v22.8h, v3.h[6] \n"
"fmla v31.8h, v22.8h, v4.h[6] \n"
"fmla v28.8h, v23.8h, v1.h[7] \n"
"fmla v29.8h, v23.8h, v2.h[7] \n"
"fmla v30.8h, v23.8h, v3.h[7] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v16.8h, v4.h[0] \n"
"fmla v31.8h, v16.8h, v5.h[0] \n"
"fmla v28.8h, v17.8h, v2.h[1] \n"
"fmla v29.8h, v17.8h, v3.h[1] \n"
"fmla v30.8h, v17.8h, v4.h[1] \n"
"fmla v31.8h, v17.8h, v5.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v18.8h, v4.h[2] \n"
"fmla v31.8h, v18.8h, v5.h[2] \n"
"fmla v28.8h, v19.8h, v2.h[3] \n"
"fmla v29.8h, v19.8h, v3.h[3] \n"
"fmla v30.8h, v19.8h, v4.h[3] \n"
"fmla v31.8h, v19.8h, v5.h[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%2], #64 \n" // r10 r11 r12 r13
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v20.8h, v4.h[4] \n"
"fmla v31.8h, v20.8h, v5.h[4] \n"
"fmla v28.8h, v21.8h, v2.h[5] \n"
"fmla v29.8h, v21.8h, v3.h[5] \n"
"fmla v30.8h, v21.8h, v4.h[5] \n"
"fmla v31.8h, v21.8h, v5.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v22.8h, v4.h[6] \n"
"fmla v31.8h, v22.8h, v5.h[6] \n"
"fmla v28.8h, v23.8h, v2.h[7] \n"
"fmla v29.8h, v23.8h, v3.h[7] \n"
"fmla v30.8h, v23.8h, v4.h[7] \n"
"fmla v31.8h, v23.8h, v5.h[7] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v12.8h, v13.8h}, [%2] \n" // r14 r15
"fmla v28.8h, v16.8h, v8.h[0] \n"
"fmla v29.8h, v16.8h, v9.h[0] \n"
"fmla v30.8h, v16.8h, v10.h[0] \n"
"fmla v31.8h, v16.8h, v11.h[0] \n"
"fmla v28.8h, v17.8h, v8.h[1] \n"
"fmla v29.8h, v17.8h, v9.h[1] \n"
"fmla v30.8h, v17.8h, v10.h[1] \n"
"fmla v31.8h, v17.8h, v11.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v8.h[2] \n"
"fmla v29.8h, v18.8h, v9.h[2] \n"
"fmla v30.8h, v18.8h, v10.h[2] \n"
"fmla v31.8h, v18.8h, v11.h[2] \n"
"fmla v28.8h, v19.8h, v8.h[3] \n"
"fmla v29.8h, v19.8h, v9.h[3] \n"
"fmla v30.8h, v19.8h, v10.h[3] \n"
"fmla v31.8h, v19.8h, v11.h[3] \n"
"fmla v28.8h, v20.8h, v8.h[4] \n"
"fmla v29.8h, v20.8h, v9.h[4] \n"
"fmla v30.8h, v20.8h, v10.h[4] \n"
"fmla v31.8h, v20.8h, v11.h[4] \n"
"fmla v28.8h, v21.8h, v8.h[5] \n"
"fmla v29.8h, v21.8h, v9.h[5] \n"
"fmla v30.8h, v21.8h, v10.h[5] \n"
"fmla v31.8h, v21.8h, v11.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v8.h[6] \n"
"fmla v29.8h, v22.8h, v9.h[6] \n"
"fmla v30.8h, v22.8h, v10.h[6] \n"
"fmla v31.8h, v22.8h, v11.h[6] \n"
"fmla v28.8h, v23.8h, v8.h[7] \n"
"fmla v29.8h, v23.8h, v9.h[7] \n"
"fmla v30.8h, v23.8h, v10.h[7] \n"
"fmla v31.8h, v23.8h, v11.h[7] \n"
"fmla v28.8h, v16.8h, v9.h[0] \n"
"fmla v29.8h, v16.8h, v10.h[0] \n"
"fmla v30.8h, v16.8h, v11.h[0] \n"
"fmla v31.8h, v16.8h, v12.h[0] \n"
"fmla v28.8h, v17.8h, v9.h[1] \n"
"fmla v29.8h, v17.8h, v10.h[1] \n"
"fmla v30.8h, v17.8h, v11.h[1] \n"
"fmla v31.8h, v17.8h, v12.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v9.h[2] \n"
"fmla v29.8h, v18.8h, v10.h[2] \n"
"fmla v30.8h, v18.8h, v11.h[2] \n"
"fmla v31.8h, v18.8h, v12.h[2] \n"
"fmla v28.8h, v19.8h, v9.h[3] \n"
"fmla v29.8h, v19.8h, v10.h[3] \n"
"fmla v30.8h, v19.8h, v11.h[3] \n"
"fmla v31.8h, v19.8h, v12.h[3] \n"
"fmla v28.8h, v20.8h, v9.h[4] \n"
"fmla v29.8h, v20.8h, v10.h[4] \n"
"fmla v30.8h, v20.8h, v11.h[4] \n"
"fmla v31.8h, v20.8h, v12.h[4] \n"
"fmla v28.8h, v21.8h, v9.h[5] \n"
"fmla v29.8h, v21.8h, v10.h[5] \n"
"fmla v30.8h, v21.8h, v11.h[5] \n"
"fmla v31.8h, v21.8h, v12.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v9.h[6] \n"
"fmla v29.8h, v22.8h, v10.h[6] \n"
"fmla v30.8h, v22.8h, v11.h[6] \n"
"fmla v31.8h, v22.8h, v12.h[6] \n"
"fmla v28.8h, v23.8h, v9.h[7] \n"
"fmla v29.8h, v23.8h, v10.h[7] \n"
"fmla v30.8h, v23.8h, v11.h[7] \n"
"fmla v31.8h, v23.8h, v12.h[7] \n"
"fmla v28.8h, v16.8h, v10.h[0] \n"
"fmla v29.8h, v16.8h, v11.h[0] \n"
"fmla v30.8h, v16.8h, v12.h[0] \n"
"fmla v31.8h, v16.8h, v13.h[0] \n"
"fmla v28.8h, v17.8h, v10.h[1] \n"
"fmla v29.8h, v17.8h, v11.h[1] \n"
"fmla v30.8h, v17.8h, v12.h[1] \n"
"fmla v31.8h, v17.8h, v13.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v10.h[2] \n"
"fmla v29.8h, v18.8h, v11.h[2] \n"
"fmla v30.8h, v18.8h, v12.h[2] \n"
"fmla v31.8h, v18.8h, v13.h[2] \n"
"fmla v28.8h, v19.8h, v10.h[3] \n"
"fmla v29.8h, v19.8h, v11.h[3] \n"
"fmla v30.8h, v19.8h, v12.h[3] \n"
"fmla v31.8h, v19.8h, v13.h[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" // r20 r21 r22 r23
"fmla v28.8h, v20.8h, v10.h[4] \n"
"fmla v29.8h, v20.8h, v11.h[4] \n"
"fmla v30.8h, v20.8h, v12.h[4] \n"
"fmla v31.8h, v20.8h, v13.h[4] \n"
"fmla v28.8h, v21.8h, v10.h[5] \n"
"fmla v29.8h, v21.8h, v11.h[5] \n"
"fmla v30.8h, v21.8h, v12.h[5] \n"
"fmla v31.8h, v21.8h, v13.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v10.h[6] \n"
"fmla v29.8h, v22.8h, v11.h[6] \n"
"fmla v30.8h, v22.8h, v12.h[6] \n"
"fmla v31.8h, v22.8h, v13.h[6] \n"
"fmla v28.8h, v23.8h, v10.h[7] \n"
"fmla v29.8h, v23.8h, v11.h[7] \n"
"fmla v30.8h, v23.8h, v12.h[7] \n"
"fmla v31.8h, v23.8h, v13.h[7] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v4.8h, v5.8h}, [%3] \n" // r24 r25
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v16.8h, v1.h[0] \n"
"fmla v30.8h, v16.8h, v2.h[0] \n"
"fmla v31.8h, v16.8h, v3.h[0] \n"
"fmla v28.8h, v17.8h, v0.h[1] \n"
"fmla v29.8h, v17.8h, v1.h[1] \n"
"fmla v30.8h, v17.8h, v2.h[1] \n"
"fmla v31.8h, v17.8h, v3.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v1.h[2] \n"
"fmla v30.8h, v18.8h, v2.h[2] \n"
"fmla v31.8h, v18.8h, v3.h[2] \n"
"fmla v28.8h, v19.8h, v0.h[3] \n"
"fmla v29.8h, v19.8h, v1.h[3] \n"
"fmla v30.8h, v19.8h, v2.h[3] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v1.h[4] \n"
"fmla v30.8h, v20.8h, v2.h[4] \n"
"fmla v31.8h, v20.8h, v3.h[4] \n"
"fmla v28.8h, v21.8h, v0.h[5] \n"
"fmla v29.8h, v21.8h, v1.h[5] \n"
"fmla v30.8h, v21.8h, v2.h[5] \n"
"fmla v31.8h, v21.8h, v3.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v1.h[6] \n"
"fmla v30.8h, v22.8h, v2.h[6] \n"
"fmla v31.8h, v22.8h, v3.h[6] \n"
"fmla v28.8h, v23.8h, v0.h[7] \n"
"fmla v29.8h, v23.8h, v1.h[7] \n"
"fmla v30.8h, v23.8h, v2.h[7] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v16.8h, v3.h[0] \n"
"fmla v31.8h, v16.8h, v4.h[0] \n"
"fmla v28.8h, v17.8h, v1.h[1] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
"fmla v30.8h, v17.8h, v3.h[1] \n"
"fmla v31.8h, v17.8h, v4.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v18.8h, v3.h[2] \n"
"fmla v31.8h, v18.8h, v4.h[2] \n"
"fmla v28.8h, v19.8h, v1.h[3] \n"
"fmla v29.8h, v19.8h, v2.h[3] \n"
"fmla v30.8h, v19.8h, v3.h[3] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v20.8h, v3.h[4] \n"
"fmla v31.8h, v20.8h, v4.h[4] \n"
"fmla v28.8h, v21.8h, v1.h[5] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"fmla v30.8h, v21.8h, v3.h[5] \n"
"fmla v31.8h, v21.8h, v4.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v22.8h, v3.h[6] \n"
"fmla v31.8h, v22.8h, v4.h[6] \n"
"fmla v28.8h, v23.8h, v1.h[7] \n"
"fmla v29.8h, v23.8h, v2.h[7] \n"
"fmla v30.8h, v23.8h, v3.h[7] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v16.8h, v4.h[0] \n"
"fmla v31.8h, v16.8h, v5.h[0] \n"
"fmla v28.8h, v17.8h, v2.h[1] \n"
"fmla v29.8h, v17.8h, v3.h[1] \n"
"fmla v30.8h, v17.8h, v4.h[1] \n"
"fmla v31.8h, v17.8h, v5.h[1] \n"
// "prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4] \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v18.8h, v4.h[2] \n"
"fmla v31.8h, v18.8h, v5.h[2] \n"
"fmla v28.8h, v19.8h, v2.h[3] \n"
"fmla v29.8h, v19.8h, v3.h[3] \n"
"fmla v30.8h, v19.8h, v4.h[3] \n"
"fmla v31.8h, v19.8h, v5.h[3] \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v20.8h, v4.h[4] \n"
"fmla v31.8h, v20.8h, v5.h[4] \n"
"fmla v28.8h, v21.8h, v2.h[5] \n"
"fmla v29.8h, v21.8h, v3.h[5] \n"
"fmla v30.8h, v21.8h, v4.h[5] \n"
"fmla v31.8h, v21.8h, v5.h[5] \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v22.8h, v4.h[6] \n"
"fmla v31.8h, v22.8h, v5.h[6] \n"
"fmla v28.8h, v23.8h, v2.h[7] \n"
"fmla v29.8h, v23.8h, v3.h[7] \n"
"fmla v30.8h, v23.8h, v4.h[7] \n"
"fmla v31.8h, v23.8h, v5.h[7] \n"
"sub %4, %4, #1088 \n" // kptr -= 8.5 * 64;
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1] \n" // r00 r01 r02 r03
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v30.8h, v31.8h}, [%0] \n" // sum0
"fmul v28.8h, v16.8h, v0.h[0] \n"
"fmul v29.8h, v16.8h, v1.h[0] \n"
"fmla v30.8h, v17.8h, v0.h[1] \n"
"fmla v31.8h, v17.8h, v1.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v1.h[2] \n"
"fmla v30.8h, v19.8h, v0.h[3] \n"
"fmla v31.8h, v19.8h, v1.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v1.h[4] \n"
"fmla v30.8h, v21.8h, v0.h[5] \n"
"fmla v31.8h, v21.8h, v1.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v1.h[6] \n"
"fmla v30.8h, v23.8h, v0.h[7] \n"
"fmla v31.8h, v23.8h, v1.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v17.8h, v1.h[1] \n"
"fmla v31.8h, v17.8h, v2.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v19.8h, v1.h[3] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v21.8h, v1.h[5] \n"
"fmla v31.8h, v21.8h, v2.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v23.8h, v1.h[7] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v17.8h, v2.h[1] \n"
"fmla v31.8h, v17.8h, v3.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v19.8h, v2.h[3] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2] \n" // r10 r11 r12 r13
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v21.8h, v2.h[5] \n"
"fmla v31.8h, v21.8h, v3.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v23.8h, v2.h[7] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"fmla v28.8h, v16.8h, v4.h[0] \n"
"fmla v29.8h, v16.8h, v5.h[0] \n"
"fmla v30.8h, v17.8h, v4.h[1] \n"
"fmla v31.8h, v17.8h, v5.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v4.h[2] \n"
"fmla v29.8h, v18.8h, v5.h[2] \n"
"fmla v30.8h, v19.8h, v4.h[3] \n"
"fmla v31.8h, v19.8h, v5.h[3] \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v20.8h, v5.h[4] \n"
"fmla v30.8h, v21.8h, v4.h[5] \n"
"fmla v31.8h, v21.8h, v5.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v4.h[6] \n"
"fmla v29.8h, v22.8h, v5.h[6] \n"
"fmla v30.8h, v23.8h, v4.h[7] \n"
"fmla v31.8h, v23.8h, v5.h[7] \n"
"fmla v28.8h, v16.8h, v5.h[0] \n"
"fmla v29.8h, v16.8h, v6.h[0] \n"
"fmla v30.8h, v17.8h, v5.h[1] \n"
"fmla v31.8h, v17.8h, v6.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v5.h[2] \n"
"fmla v29.8h, v18.8h, v6.h[2] \n"
"fmla v30.8h, v19.8h, v5.h[3] \n"
"fmla v31.8h, v19.8h, v6.h[3] \n"
"fmla v28.8h, v20.8h, v5.h[4] \n"
"fmla v29.8h, v20.8h, v6.h[4] \n"
"fmla v30.8h, v21.8h, v5.h[5] \n"
"fmla v31.8h, v21.8h, v6.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v5.h[6] \n"
"fmla v29.8h, v22.8h, v6.h[6] \n"
"fmla v30.8h, v23.8h, v5.h[7] \n"
"fmla v31.8h, v23.8h, v6.h[7] \n"
"fmla v28.8h, v16.8h, v6.h[0] \n"
"fmla v29.8h, v16.8h, v7.h[0] \n"
"fmla v30.8h, v17.8h, v6.h[1] \n"
"fmla v31.8h, v17.8h, v7.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v6.h[2] \n"
"fmla v29.8h, v18.8h, v7.h[2] \n"
"fmla v30.8h, v19.8h, v6.h[3] \n"
"fmla v31.8h, v19.8h, v7.h[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3] \n" // r20 r21 r22 r23
"fmla v28.8h, v20.8h, v6.h[4] \n"
"fmla v29.8h, v20.8h, v7.h[4] \n"
"fmla v30.8h, v21.8h, v6.h[5] \n"
"fmla v31.8h, v21.8h, v7.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v6.h[6] \n"
"fmla v29.8h, v22.8h, v7.h[6] \n"
"fmla v30.8h, v23.8h, v6.h[7] \n"
"fmla v31.8h, v23.8h, v7.h[7] \n"
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v16.8h, v1.h[0] \n"
"fmla v30.8h, v17.8h, v0.h[1] \n"
"fmla v31.8h, v17.8h, v1.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v1.h[2] \n"
"fmla v30.8h, v19.8h, v0.h[3] \n"
"fmla v31.8h, v19.8h, v1.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v1.h[4] \n"
"fmla v30.8h, v21.8h, v0.h[5] \n"
"fmla v31.8h, v21.8h, v1.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v1.h[6] \n"
"fmla v30.8h, v23.8h, v0.h[7] \n"
"fmla v31.8h, v23.8h, v1.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v17.8h, v1.h[1] \n"
"fmla v31.8h, v17.8h, v2.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v19.8h, v1.h[3] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v21.8h, v1.h[5] \n"
"fmla v31.8h, v21.8h, v2.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v23.8h, v1.h[7] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v17.8h, v2.h[1] \n"
"fmla v31.8h, v17.8h, v3.h[1] \n"
// "prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4] \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v19.8h, v2.h[3] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v21.8h, v2.h[5] \n"
"fmla v31.8h, v21.8h, v3.h[5] \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v23.8h, v2.h[7] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"add %1, %1, #32 \n"
"add %2, %2, #32 \n"
"add %3, %3, #32 \n"
"fadd v28.8h, v28.8h, v30.8h \n"
"fadd v29.8h, v29.8h, v31.8h \n"
"sub %4, %4, #1088 \n" // kptr -= 8.5 * 64;
"st1 {v28.8h, v29.8h}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31");
}
for (; j < outw; j++)
{
asm volatile(
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"prfm pldl1keep, [%1, #384] \n"
"ld1 {v0.8h, v1.8h, v2.8h}, [%1] \n" // r00 r01 r02
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v31.8h}, [%0] \n" // sum0
"fmul v28.8h, v16.8h, v0.h[0] \n"
"fmul v29.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmul v30.8h, v18.8h, v0.h[2] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v21.8h, v0.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v0.h[6] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v17.8h, v1.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v1.h[2] \n"
"fmla v31.8h, v19.8h, v1.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v21.8h, v1.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v1.h[6] \n"
"fmla v31.8h, v23.8h, v1.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v2.h[2] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v3.8h, v4.8h, v5.8h}, [%2] \n" // r10 r11 r12
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v2.h[6] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"fmla v28.8h, v16.8h, v3.h[0] \n"
"fmla v29.8h, v17.8h, v3.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v3.h[2] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"fmla v28.8h, v20.8h, v3.h[4] \n"
"fmla v29.8h, v21.8h, v3.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v3.h[6] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"fmla v28.8h, v16.8h, v4.h[0] \n"
"fmla v29.8h, v17.8h, v4.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v4.h[2] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v21.8h, v4.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v4.h[6] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"fmla v28.8h, v16.8h, v5.h[0] \n"
"fmla v29.8h, v17.8h, v5.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v5.h[2] \n"
"fmla v31.8h, v19.8h, v5.h[3] \n"
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v0.8h, v1.8h, v2.8h}, [%3] \n" // r20 r21 r22
"fmla v28.8h, v20.8h, v5.h[4] \n"
"fmla v29.8h, v21.8h, v5.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v5.h[6] \n"
"fmla v31.8h, v23.8h, v5.h[7] \n"
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v0.h[2] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v21.8h, v0.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v0.h[6] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v17.8h, v1.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v1.h[2] \n"
"fmla v31.8h, v19.8h, v1.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v21.8h, v1.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v1.h[6] \n"
"fmla v31.8h, v23.8h, v1.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
// "prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4] \n"
"fmla v30.8h, v18.8h, v2.h[2] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"add %1, %1, #16 \n"
"fmla v30.8h, v22.8h, v2.h[6] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"add %2, %2, #16 \n"
"fadd v28.8h, v28.8h, v29.8h \n"
"fadd v30.8h, v30.8h, v31.8h \n"
"add %3, %3, #16 \n"
"fadd v28.8h, v28.8h, v30.8h \n"
"sub %4, %4, #1088 \n" // kptr -= 8.5 * 64;
"st1 {v28.8h}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31");
}
r0 += 16;
r1 += 16;
r2 += 16;
}
}
}
}
static void conv3x3s2_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = (w - 2 * outw + w) * 8;
const __fp16* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
float16x8_t _bias0 = bias ? vld1q_f16(bias + p * 8) : vdupq_n_f16(0.f);
out0.fill(_bias0);
for (int q = 0; q < inch; q++)
{
__fp16* outptr0 = out0;
const Mat img0 = bottom_blob.channel(q);
const __fp16* r0 = img0.row<const __fp16>(0);
const __fp16* r1 = img0.row<const __fp16>(1);
const __fp16* r2 = img0.row<const __fp16>(2);
const __fp16* kptr = kernel.channel(p).row<const __fp16>(q);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
asm volatile(
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" // r00 r01 r02 r03
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0] \n" // sum0
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%1], #64 \n" // r04 r05 r06 r07
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v16.8h, v4.h[0] \n"
"fmla v31.8h, v16.8h, v6.h[0] \n"
"fmla v28.8h, v17.8h, v0.h[1] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
"fmla v30.8h, v17.8h, v4.h[1] \n"
"fmla v31.8h, v17.8h, v6.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v18.8h, v4.h[2] \n"
"fmla v31.8h, v18.8h, v6.h[2] \n"
"fmla v28.8h, v19.8h, v0.h[3] \n"
"fmla v29.8h, v19.8h, v2.h[3] \n"
"fmla v30.8h, v19.8h, v4.h[3] \n"
"fmla v31.8h, v19.8h, v6.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v20.8h, v4.h[4] \n"
"fmla v31.8h, v20.8h, v6.h[4] \n"
"fmla v28.8h, v21.8h, v0.h[5] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"fmla v30.8h, v21.8h, v4.h[5] \n"
"fmla v31.8h, v21.8h, v6.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v22.8h, v4.h[6] \n"
"fmla v31.8h, v22.8h, v6.h[6] \n"
"fmla v28.8h, v23.8h, v0.h[7] \n"
"fmla v29.8h, v23.8h, v2.h[7] \n"
"fmla v30.8h, v23.8h, v4.h[7] \n"
"fmla v31.8h, v23.8h, v6.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v16.8h, v5.h[0] \n"
"fmla v31.8h, v16.8h, v7.h[0] \n"
"fmla v28.8h, v17.8h, v1.h[1] \n"
"fmla v29.8h, v17.8h, v3.h[1] \n"
"fmla v30.8h, v17.8h, v5.h[1] \n"
"fmla v31.8h, v17.8h, v7.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v18.8h, v5.h[2] \n"
"fmla v31.8h, v18.8h, v7.h[2] \n"
"fmla v28.8h, v19.8h, v1.h[3] \n"
"fmla v29.8h, v19.8h, v3.h[3] \n"
"fmla v30.8h, v19.8h, v5.h[3] \n"
"fmla v31.8h, v19.8h, v7.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v20.8h, v5.h[4] \n"
"fmla v31.8h, v20.8h, v7.h[4] \n"
"fmla v28.8h, v21.8h, v1.h[5] \n"
"fmla v29.8h, v21.8h, v3.h[5] \n"
"fmla v30.8h, v21.8h, v5.h[5] \n"
"fmla v31.8h, v21.8h, v7.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v22.8h, v5.h[6] \n"
"fmla v31.8h, v22.8h, v7.h[6] \n"
"fmla v28.8h, v23.8h, v1.h[7] \n"
"fmla v29.8h, v23.8h, v3.h[7] \n"
"fmla v30.8h, v23.8h, v5.h[7] \n"
"fmla v31.8h, v23.8h, v7.h[7] \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.8h}, [%1] \n" // r08
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v4.h[0] \n"
"fmla v30.8h, v16.8h, v6.h[0] \n"
"fmla v31.8h, v16.8h, v0.h[0] \n"
"fmla v28.8h, v17.8h, v2.h[1] \n"
"fmla v29.8h, v17.8h, v4.h[1] \n"
"fmla v30.8h, v17.8h, v6.h[1] \n"
"fmla v31.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v4.h[2] \n"
"fmla v30.8h, v18.8h, v6.h[2] \n"
"fmla v31.8h, v18.8h, v0.h[2] \n"
"fmla v28.8h, v19.8h, v2.h[3] \n"
"fmla v29.8h, v19.8h, v4.h[3] \n"
"fmla v30.8h, v19.8h, v6.h[3] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%2], #64 \n" // r10 r11 r12 r13
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v4.h[4] \n"
"fmla v30.8h, v20.8h, v6.h[4] \n"
"fmla v31.8h, v20.8h, v0.h[4] \n"
"fmla v28.8h, v21.8h, v2.h[5] \n"
"fmla v29.8h, v21.8h, v4.h[5] \n"
"fmla v30.8h, v21.8h, v6.h[5] \n"
"fmla v31.8h, v21.8h, v0.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v4.h[6] \n"
"fmla v30.8h, v22.8h, v6.h[6] \n"
"fmla v31.8h, v22.8h, v0.h[6] \n"
"fmla v28.8h, v23.8h, v2.h[7] \n"
"fmla v29.8h, v23.8h, v4.h[7] \n"
"fmla v30.8h, v23.8h, v6.h[7] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%2], #64 \n" // r14 r15 r16 r17
"fmla v28.8h, v16.8h, v8.h[0] \n"
"fmla v29.8h, v16.8h, v10.h[0] \n"
"fmla v30.8h, v16.8h, v12.h[0] \n"
"fmla v31.8h, v16.8h, v14.h[0] \n"
"fmla v28.8h, v17.8h, v8.h[1] \n"
"fmla v29.8h, v17.8h, v10.h[1] \n"
"fmla v30.8h, v17.8h, v12.h[1] \n"
"fmla v31.8h, v17.8h, v14.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v8.h[2] \n"
"fmla v29.8h, v18.8h, v10.h[2] \n"
"fmla v30.8h, v18.8h, v12.h[2] \n"
"fmla v31.8h, v18.8h, v14.h[2] \n"
"fmla v28.8h, v19.8h, v8.h[3] \n"
"fmla v29.8h, v19.8h, v10.h[3] \n"
"fmla v30.8h, v19.8h, v12.h[3] \n"
"fmla v31.8h, v19.8h, v14.h[3] \n"
"fmla v28.8h, v20.8h, v8.h[4] \n"
"fmla v29.8h, v20.8h, v10.h[4] \n"
"fmla v30.8h, v20.8h, v12.h[4] \n"
"fmla v31.8h, v20.8h, v14.h[4] \n"
"fmla v28.8h, v21.8h, v8.h[5] \n"
"fmla v29.8h, v21.8h, v10.h[5] \n"
"fmla v30.8h, v21.8h, v12.h[5] \n"
"fmla v31.8h, v21.8h, v14.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v8.h[6] \n"
"fmla v29.8h, v22.8h, v10.h[6] \n"
"fmla v30.8h, v22.8h, v12.h[6] \n"
"fmla v31.8h, v22.8h, v14.h[6] \n"
"fmla v28.8h, v23.8h, v8.h[7] \n"
"fmla v29.8h, v23.8h, v10.h[7] \n"
"fmla v30.8h, v23.8h, v12.h[7] \n"
"fmla v31.8h, v23.8h, v14.h[7] \n"
"fmla v28.8h, v16.8h, v9.h[0] \n"
"fmla v29.8h, v16.8h, v11.h[0] \n"
"fmla v30.8h, v16.8h, v13.h[0] \n"
"fmla v31.8h, v16.8h, v15.h[0] \n"
"fmla v28.8h, v17.8h, v9.h[1] \n"
"fmla v29.8h, v17.8h, v11.h[1] \n"
"fmla v30.8h, v17.8h, v13.h[1] \n"
"fmla v31.8h, v17.8h, v15.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v9.h[2] \n"
"fmla v29.8h, v18.8h, v11.h[2] \n"
"fmla v30.8h, v18.8h, v13.h[2] \n"
"fmla v31.8h, v18.8h, v15.h[2] \n"
"fmla v28.8h, v19.8h, v9.h[3] \n"
"fmla v29.8h, v19.8h, v11.h[3] \n"
"fmla v30.8h, v19.8h, v13.h[3] \n"
"fmla v31.8h, v19.8h, v15.h[3] \n"
"fmla v28.8h, v20.8h, v9.h[4] \n"
"fmla v29.8h, v20.8h, v11.h[4] \n"
"fmla v30.8h, v20.8h, v13.h[4] \n"
"fmla v31.8h, v20.8h, v15.h[4] \n"
"fmla v28.8h, v21.8h, v9.h[5] \n"
"fmla v29.8h, v21.8h, v11.h[5] \n"
"fmla v30.8h, v21.8h, v13.h[5] \n"
"fmla v31.8h, v21.8h, v15.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v9.h[6] \n"
"fmla v29.8h, v22.8h, v11.h[6] \n"
"fmla v30.8h, v22.8h, v13.h[6] \n"
"fmla v31.8h, v22.8h, v15.h[6] \n"
"fmla v28.8h, v23.8h, v9.h[7] \n"
"fmla v29.8h, v23.8h, v11.h[7] \n"
"fmla v30.8h, v23.8h, v13.h[7] \n"
"fmla v31.8h, v23.8h, v15.h[7] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v8.8h}, [%2] \n" // r18
"fmla v28.8h, v16.8h, v10.h[0] \n"
"fmla v29.8h, v16.8h, v12.h[0] \n"
"fmla v30.8h, v16.8h, v14.h[0] \n"
"fmla v31.8h, v16.8h, v8.h[0] \n"
"fmla v28.8h, v17.8h, v10.h[1] \n"
"fmla v29.8h, v17.8h, v12.h[1] \n"
"fmla v30.8h, v17.8h, v14.h[1] \n"
"fmla v31.8h, v17.8h, v8.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v10.h[2] \n"
"fmla v29.8h, v18.8h, v12.h[2] \n"
"fmla v30.8h, v18.8h, v14.h[2] \n"
"fmla v31.8h, v18.8h, v8.h[2] \n"
"fmla v28.8h, v19.8h, v10.h[3] \n"
"fmla v29.8h, v19.8h, v12.h[3] \n"
"fmla v30.8h, v19.8h, v14.h[3] \n"
"fmla v31.8h, v19.8h, v8.h[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" // r20 r21 r22 r23
"fmla v28.8h, v20.8h, v10.h[4] \n"
"fmla v29.8h, v20.8h, v12.h[4] \n"
"fmla v30.8h, v20.8h, v14.h[4] \n"
"fmla v31.8h, v20.8h, v8.h[4] \n"
"fmla v28.8h, v21.8h, v10.h[5] \n"
"fmla v29.8h, v21.8h, v12.h[5] \n"
"fmla v30.8h, v21.8h, v14.h[5] \n"
"fmla v31.8h, v21.8h, v8.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v10.h[6] \n"
"fmla v29.8h, v22.8h, v12.h[6] \n"
"fmla v30.8h, v22.8h, v14.h[6] \n"
"fmla v31.8h, v22.8h, v8.h[6] \n"
"fmla v28.8h, v23.8h, v10.h[7] \n"
"fmla v29.8h, v23.8h, v12.h[7] \n"
"fmla v30.8h, v23.8h, v14.h[7] \n"
"fmla v31.8h, v23.8h, v8.h[7] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%3], #64 \n" // r24 r25 r26 r27
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v16.8h, v4.h[0] \n"
"fmla v31.8h, v16.8h, v6.h[0] \n"
"fmla v28.8h, v17.8h, v0.h[1] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
"fmla v30.8h, v17.8h, v4.h[1] \n"
"fmla v31.8h, v17.8h, v6.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v18.8h, v4.h[2] \n"
"fmla v31.8h, v18.8h, v6.h[2] \n"
"fmla v28.8h, v19.8h, v0.h[3] \n"
"fmla v29.8h, v19.8h, v2.h[3] \n"
"fmla v30.8h, v19.8h, v4.h[3] \n"
"fmla v31.8h, v19.8h, v6.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v20.8h, v4.h[4] \n"
"fmla v31.8h, v20.8h, v6.h[4] \n"
"fmla v28.8h, v21.8h, v0.h[5] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"fmla v30.8h, v21.8h, v4.h[5] \n"
"fmla v31.8h, v21.8h, v6.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v22.8h, v4.h[6] \n"
"fmla v31.8h, v22.8h, v6.h[6] \n"
"fmla v28.8h, v23.8h, v0.h[7] \n"
"fmla v29.8h, v23.8h, v2.h[7] \n"
"fmla v30.8h, v23.8h, v4.h[7] \n"
"fmla v31.8h, v23.8h, v6.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v16.8h, v5.h[0] \n"
"fmla v31.8h, v16.8h, v7.h[0] \n"
"fmla v28.8h, v17.8h, v1.h[1] \n"
"fmla v29.8h, v17.8h, v3.h[1] \n"
"fmla v30.8h, v17.8h, v5.h[1] \n"
"fmla v31.8h, v17.8h, v7.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v18.8h, v5.h[2] \n"
"fmla v31.8h, v18.8h, v7.h[2] \n"
"fmla v28.8h, v19.8h, v1.h[3] \n"
"fmla v29.8h, v19.8h, v3.h[3] \n"
"fmla v30.8h, v19.8h, v5.h[3] \n"
"fmla v31.8h, v19.8h, v7.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v20.8h, v5.h[4] \n"
"fmla v31.8h, v20.8h, v7.h[4] \n"
"fmla v28.8h, v21.8h, v1.h[5] \n"
"fmla v29.8h, v21.8h, v3.h[5] \n"
"fmla v30.8h, v21.8h, v5.h[5] \n"
"fmla v31.8h, v21.8h, v7.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v22.8h, v5.h[6] \n"
"fmla v31.8h, v22.8h, v7.h[6] \n"
"fmla v28.8h, v23.8h, v1.h[7] \n"
"fmla v29.8h, v23.8h, v3.h[7] \n"
"fmla v30.8h, v23.8h, v5.h[7] \n"
"fmla v31.8h, v23.8h, v7.h[7] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.8h}, [%3] \n" // r28
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v4.h[0] \n"
"fmla v30.8h, v16.8h, v6.h[0] \n"
"fmla v31.8h, v16.8h, v0.h[0] \n"
"fmla v28.8h, v17.8h, v2.h[1] \n"
"fmla v29.8h, v17.8h, v4.h[1] \n"
"fmla v30.8h, v17.8h, v6.h[1] \n"
"fmla v31.8h, v17.8h, v0.h[1] \n"
// "prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4] \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v4.h[2] \n"
"fmla v30.8h, v18.8h, v6.h[2] \n"
"fmla v31.8h, v18.8h, v0.h[2] \n"
"fmla v28.8h, v19.8h, v2.h[3] \n"
"fmla v29.8h, v19.8h, v4.h[3] \n"
"fmla v30.8h, v19.8h, v6.h[3] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v4.h[4] \n"
"fmla v30.8h, v20.8h, v6.h[4] \n"
"fmla v31.8h, v20.8h, v0.h[4] \n"
"fmla v28.8h, v21.8h, v2.h[5] \n"
"fmla v29.8h, v21.8h, v4.h[5] \n"
"fmla v30.8h, v21.8h, v6.h[5] \n"
"fmla v31.8h, v21.8h, v0.h[5] \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v4.h[6] \n"
"fmla v30.8h, v22.8h, v6.h[6] \n"
"fmla v31.8h, v22.8h, v0.h[6] \n"
"fmla v28.8h, v23.8h, v2.h[7] \n"
"fmla v29.8h, v23.8h, v4.h[7] \n"
"fmla v30.8h, v23.8h, v6.h[7] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"sub %4, %4, #1088 \n" // kptr -= 8.5 * 64;
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" // r00 r01 r02 r03
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v30.8h, v31.8h}, [%0] \n" // sum0
"fmul v28.8h, v16.8h, v0.h[0] \n"
"fmul v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v17.8h, v0.h[1] \n"
"fmla v31.8h, v17.8h, v2.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v19.8h, v0.h[3] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v21.8h, v0.h[5] \n"
"fmla v31.8h, v21.8h, v2.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v23.8h, v0.h[7] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v17.8h, v1.h[1] \n"
"fmla v31.8h, v17.8h, v3.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v19.8h, v1.h[3] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.8h}, [%1] \n" // r04
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v21.8h, v1.h[5] \n"
"fmla v31.8h, v21.8h, v3.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v23.8h, v1.h[7] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v0.h[0] \n"
"fmla v30.8h, v17.8h, v2.h[1] \n"
"fmla v31.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v0.h[2] \n"
"fmla v30.8h, v19.8h, v2.h[3] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r10 r11 r12 r13
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v0.h[4] \n"
"fmla v30.8h, v21.8h, v2.h[5] \n"
"fmla v31.8h, v21.8h, v0.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v0.h[6] \n"
"fmla v30.8h, v23.8h, v2.h[7] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"fmla v28.8h, v16.8h, v4.h[0] \n"
"fmla v29.8h, v16.8h, v6.h[0] \n"
"fmla v30.8h, v17.8h, v4.h[1] \n"
"fmla v31.8h, v17.8h, v6.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v4.h[2] \n"
"fmla v29.8h, v18.8h, v6.h[2] \n"
"fmla v30.8h, v19.8h, v4.h[3] \n"
"fmla v31.8h, v19.8h, v6.h[3] \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v20.8h, v6.h[4] \n"
"fmla v30.8h, v21.8h, v4.h[5] \n"
"fmla v31.8h, v21.8h, v6.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v4.h[6] \n"
"fmla v29.8h, v22.8h, v6.h[6] \n"
"fmla v30.8h, v23.8h, v4.h[7] \n"
"fmla v31.8h, v23.8h, v6.h[7] \n"
"fmla v28.8h, v16.8h, v5.h[0] \n"
"fmla v29.8h, v16.8h, v7.h[0] \n"
"fmla v30.8h, v17.8h, v5.h[1] \n"
"fmla v31.8h, v17.8h, v7.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v5.h[2] \n"
"fmla v29.8h, v18.8h, v7.h[2] \n"
"fmla v30.8h, v19.8h, v5.h[3] \n"
"fmla v31.8h, v19.8h, v7.h[3] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v4.8h}, [%2] \n" // r14
"fmla v28.8h, v20.8h, v5.h[4] \n"
"fmla v29.8h, v20.8h, v7.h[4] \n"
"fmla v30.8h, v21.8h, v5.h[5] \n"
"fmla v31.8h, v21.8h, v7.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v5.h[6] \n"
"fmla v29.8h, v22.8h, v7.h[6] \n"
"fmla v30.8h, v23.8h, v5.h[7] \n"
"fmla v31.8h, v23.8h, v7.h[7] \n"
"fmla v28.8h, v16.8h, v6.h[0] \n"
"fmla v29.8h, v16.8h, v4.h[0] \n"
"fmla v30.8h, v17.8h, v6.h[1] \n"
"fmla v31.8h, v17.8h, v4.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v6.h[2] \n"
"fmla v29.8h, v18.8h, v4.h[2] \n"
"fmla v30.8h, v19.8h, v6.h[3] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" // r20 r21 r22 r23
"fmla v28.8h, v20.8h, v6.h[4] \n"
"fmla v29.8h, v20.8h, v4.h[4] \n"
"fmla v30.8h, v21.8h, v6.h[5] \n"
"fmla v31.8h, v21.8h, v4.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v6.h[6] \n"
"fmla v29.8h, v22.8h, v4.h[6] \n"
"fmla v30.8h, v23.8h, v6.h[7] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v17.8h, v0.h[1] \n"
"fmla v31.8h, v17.8h, v2.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v19.8h, v0.h[3] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v21.8h, v0.h[5] \n"
"fmla v31.8h, v21.8h, v2.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v23.8h, v0.h[7] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v17.8h, v1.h[1] \n"
"fmla v31.8h, v17.8h, v3.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v19.8h, v1.h[3] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.8h}, [%3] \n" // r24
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v21.8h, v1.h[5] \n"
"fmla v31.8h, v21.8h, v3.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v23.8h, v1.h[7] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v0.h[0] \n"
"fmla v30.8h, v17.8h, v2.h[1] \n"
"fmla v31.8h, v17.8h, v0.h[1] \n"
// "prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4] \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v0.h[2] \n"
"fmla v30.8h, v19.8h, v2.h[3] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v0.h[4] \n"
"fmla v30.8h, v21.8h, v2.h[5] \n"
"fmla v31.8h, v21.8h, v0.h[5] \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v0.h[6] \n"
"fmla v30.8h, v23.8h, v2.h[7] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"fadd v28.8h, v28.8h, v30.8h \n"
"fadd v29.8h, v29.8h, v31.8h \n"
"sub %4, %4, #1088 \n" // kptr -= 8.5 * 64;
"st1 {v28.8h, v29.8h}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31");
}
for (; j < outw; j++)
{
asm volatile(
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"prfm pldl1keep, [%1, #384] \n"
"ld1 {v0.8h, v1.8h, v2.8h}, [%1] \n" // r00 r01 r02
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v31.8h}, [%0] \n" // sum0
"fmul v28.8h, v16.8h, v0.h[0] \n"
"fmul v29.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmul v30.8h, v18.8h, v0.h[2] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v21.8h, v0.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v0.h[6] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v17.8h, v1.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v1.h[2] \n"
"fmla v31.8h, v19.8h, v1.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v21.8h, v1.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v1.h[6] \n"
"fmla v31.8h, v23.8h, v1.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v2.h[2] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v3.8h, v4.8h, v5.8h}, [%2] \n" // r10 r11 r12
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v2.h[6] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"fmla v28.8h, v16.8h, v3.h[0] \n"
"fmla v29.8h, v17.8h, v3.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v3.h[2] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"fmla v28.8h, v20.8h, v3.h[4] \n"
"fmla v29.8h, v21.8h, v3.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v3.h[6] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"fmla v28.8h, v16.8h, v4.h[0] \n"
"fmla v29.8h, v17.8h, v4.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v4.h[2] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v21.8h, v4.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v4.h[6] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"fmla v28.8h, v16.8h, v5.h[0] \n"
"fmla v29.8h, v17.8h, v5.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v5.h[2] \n"
"fmla v31.8h, v19.8h, v5.h[3] \n"
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v0.8h, v1.8h, v2.8h}, [%3] \n" // r20 r21 r22
"fmla v28.8h, v20.8h, v5.h[4] \n"
"fmla v29.8h, v21.8h, v5.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v5.h[6] \n"
"fmla v31.8h, v23.8h, v5.h[7] \n"
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v0.h[2] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v21.8h, v0.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v0.h[6] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v17.8h, v1.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v1.h[2] \n"
"fmla v31.8h, v19.8h, v1.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v21.8h, v1.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v1.h[6] \n"
"fmla v31.8h, v23.8h, v1.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
// "prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4] \n"
"fmla v30.8h, v18.8h, v2.h[2] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"add %1, %1, #32 \n"
"fmla v30.8h, v22.8h, v2.h[6] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"add %2, %2, #32 \n"
"fadd v28.8h, v28.8h, v29.8h \n"
"fadd v30.8h, v30.8h, v31.8h \n"
"add %3, %3, #32 \n"
"fadd v28.8h, v28.8h, v30.8h \n"
"sub %4, %4, #1088 \n" // kptr -= 8.5 * 64;
"st1 {v28.8h}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31");
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
}
|
jacobi-1d.pluto_orio.seq_par.c |
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
double a[T][N];
void init_input_vars()
{
int i, j;
for (i=0; i<T; i++)
for (j=0; j<N; j++)
a[i][j] = i+((double)j)/N;
}
double rtclock()
{
struct timezone tzp;
struct timeval tp;
int stat;
gettimeofday (&tp, &tzp);
return (tp.tv_sec + tp.tv_usec*1.0e-6);
}
int main()
{
init_input_vars();
double orio_t_start=0, orio_t_end=0, orio_t_total=0;
int orio_i;
int t,i,j;
for (orio_i=0; orio_i<REPS; orio_i++)
{
orio_t_start = rtclock();
register int i,j,k,t;
register int c1t, c2t, c3t, c4t, c5t, c6t, c7t, c8t, c9t, c10t, c11t, c12t;
register int newlb_c1, newlb_c2, newlb_c3, newlb_c4, newlb_c5, newlb_c6,
newlb_c7, newlb_c8, newlb_c9, newlb_c10, newlb_c11, newlb_c12;
register int newub_c1, newub_c2, newub_c3, newub_c4, newub_c5, newub_c6,
newub_c7, newub_c8, newub_c9, newub_c10, newub_c11, newub_c12;
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
int c1, c2, c3, c4, c5, c6;
register int lb, ub, lb1, ub1, lb2, ub2;
/* Generated from PLuTo-produced CLooG file by CLooG v0.14.1 64 bits in 0.01s. */
for (c1=-1;c1<=floord(33*T+N-35,1024);c1++) {
lb1=max(max(0,ceild(16*c1-511,528)),ceild(32*c1-T+1,32));
ub1=min(min(floord(16*c1+15,16),floord(32*c1+N+29,1056)),floord(T+N-3,1024));
#pragma omp parallel for shared(c1,lb1,ub1) private(c2,c3,c4,c5,c6)
for (c2=lb1; c2<=ub1; c2++) {
/*@ begin Loop(
transform Composite(
permut = [['c5', 'c6']],
regtile = (['c5', 'c6'],[2, 8]),
scalarreplace = (False, 'double'),
vector = (True, ['ivdep','vector always']))
for (c4=max(max(0,ceild(16*c1-16*c2-63,64)),8*c2);c4<=min(min(8*c2+7,floord(32*c1-32*c2+N+29,128)),floord(T+N-3,128));c4++) {
for (c5=max(max(32*c1-32*c2,1),128*c4-N+2);c5<=min(min(128*c4+126,T-1),32*c1-32*c2+31);c5++) {
for (c6=max(c5+1,128*c4);c6<=min(128*c4+127,c5+N-2);c6++) {
a[c5][-c5+c6]=((double)(333))/1000*(a[c5-1][1+-c5+c6]+a[c5-1][-c5+c6]+a[c5-1][-c5+c6-1]) ;
}
}
}
) @*/for (c4=max(max(0,ceild(16*c1-16*c2-63,64)),8*c2); c4<=min(min(8*c2+7,floord(32*c1-32*c2+N+29,128)),floord(T+N-3,128)); c4++ ) {
for (c5t=max(max(32*c1-32*c2,1),128*c4-N+2); c5t<=min(min(128*c4+126,T-1),32*c1-32*c2+31)-1; c5t=c5t+2) {
newlb_c6=-2147483648;
newub_c6=2147483647;
register int cbv_1;
cbv_1=c5t+1;
#pragma ivdep
#pragma vector always
for (c5=c5t; c5<=cbv_1; c5=c5+1) {
newlb_c6=max(newlb_c6,max(c5+1,128*c4));
newub_c6=min(newub_c6,min(128*c4+127,c5+N-2));
}
for (c5=c5t; c5<=c5t+1; c5=c5+1) {
register int cbv_2, cbv_3;
cbv_2=max(c5+1,128*c4);
cbv_3=newlb_c6-1;
#pragma ivdep
#pragma vector always
for (c6=cbv_2; c6<=cbv_3; c6=c6+1) {
a[c5][-c5+c6]=((double)(333))/1000*(a[c5-1][1+-c5+c6]+a[c5-1][-c5+c6]+a[c5-1][-c5+c6-1]);
}
}
register int cbv_4;
cbv_4=newub_c6-7;
#pragma ivdep
#pragma vector always
for (c6t=newlb_c6; c6t<=cbv_4; c6t=c6t+8) {
a[c5t][-c5t+c6t]=((double)(333))/1000*(a[c5t-1][1+-c5t+c6t]+a[c5t-1][-c5t+c6t]+a[c5t-1][-c5t+c6t-1]);
a[c5t][-c5t+(c6t+1)]=((double)(333))/1000*(a[c5t-1][1+-c5t+(c6t+1)]+a[c5t-1][-c5t+(c6t+1)]+a[c5t-1][-c5t+(c6t+1)-1]);
a[c5t][-c5t+(c6t+2)]=((double)(333))/1000*(a[c5t-1][1+-c5t+(c6t+2)]+a[c5t-1][-c5t+(c6t+2)]+a[c5t-1][-c5t+(c6t+2)-1]);
a[c5t][-c5t+(c6t+3)]=((double)(333))/1000*(a[c5t-1][1+-c5t+(c6t+3)]+a[c5t-1][-c5t+(c6t+3)]+a[c5t-1][-c5t+(c6t+3)-1]);
a[c5t][-c5t+(c6t+4)]=((double)(333))/1000*(a[c5t-1][1+-c5t+(c6t+4)]+a[c5t-1][-c5t+(c6t+4)]+a[c5t-1][-c5t+(c6t+4)-1]);
a[c5t][-c5t+(c6t+5)]=((double)(333))/1000*(a[c5t-1][1+-c5t+(c6t+5)]+a[c5t-1][-c5t+(c6t+5)]+a[c5t-1][-c5t+(c6t+5)-1]);
a[c5t][-c5t+(c6t+6)]=((double)(333))/1000*(a[c5t-1][1+-c5t+(c6t+6)]+a[c5t-1][-c5t+(c6t+6)]+a[c5t-1][-c5t+(c6t+6)-1]);
a[c5t][-c5t+(c6t+7)]=((double)(333))/1000*(a[c5t-1][1+-c5t+(c6t+7)]+a[c5t-1][-c5t+(c6t+7)]+a[c5t-1][-c5t+(c6t+7)-1]);
a[(c5t+1)][-(c5t+1)+c6t]=((double)(333))/1000*(a[(c5t+1)-1][1+-(c5t+1)+c6t]+a[(c5t+1)-1][-(c5t+1)+c6t]+a[(c5t+1)-1][-(c5t+1)+c6t-1]);
a[(c5t+1)][-(c5t+1)+(c6t+1)]=((double)(333))/1000*(a[(c5t+1)-1][1+-(c5t+1)+(c6t+1)]+a[(c5t+1)-1][-(c5t+1)+(c6t+1)]+a[(c5t+1)-1][-(c5t+1)+(c6t+1)-1]);
a[(c5t+1)][-(c5t+1)+(c6t+2)]=((double)(333))/1000*(a[(c5t+1)-1][1+-(c5t+1)+(c6t+2)]+a[(c5t+1)-1][-(c5t+1)+(c6t+2)]+a[(c5t+1)-1][-(c5t+1)+(c6t+2)-1]);
a[(c5t+1)][-(c5t+1)+(c6t+3)]=((double)(333))/1000*(a[(c5t+1)-1][1+-(c5t+1)+(c6t+3)]+a[(c5t+1)-1][-(c5t+1)+(c6t+3)]+a[(c5t+1)-1][-(c5t+1)+(c6t+3)-1]);
a[(c5t+1)][-(c5t+1)+(c6t+4)]=((double)(333))/1000*(a[(c5t+1)-1][1+-(c5t+1)+(c6t+4)]+a[(c5t+1)-1][-(c5t+1)+(c6t+4)]+a[(c5t+1)-1][-(c5t+1)+(c6t+4)-1]);
a[(c5t+1)][-(c5t+1)+(c6t+5)]=((double)(333))/1000*(a[(c5t+1)-1][1+-(c5t+1)+(c6t+5)]+a[(c5t+1)-1][-(c5t+1)+(c6t+5)]+a[(c5t+1)-1][-(c5t+1)+(c6t+5)-1]);
a[(c5t+1)][-(c5t+1)+(c6t+6)]=((double)(333))/1000*(a[(c5t+1)-1][1+-(c5t+1)+(c6t+6)]+a[(c5t+1)-1][-(c5t+1)+(c6t+6)]+a[(c5t+1)-1][-(c5t+1)+(c6t+6)-1]);
a[(c5t+1)][-(c5t+1)+(c6t+7)]=((double)(333))/1000*(a[(c5t+1)-1][1+-(c5t+1)+(c6t+7)]+a[(c5t+1)-1][-(c5t+1)+(c6t+7)]+a[(c5t+1)-1][-(c5t+1)+(c6t+7)-1]);
}
#pragma ivdep
#pragma vector always
for (c6=c6t; c6<=newub_c6; c6=c6+1) {
a[c5t][-c5t+c6]=((double)(333))/1000*(a[c5t-1][1+-c5t+c6]+a[c5t-1][-c5t+c6]+a[c5t-1][-c5t+c6-1]);
a[(c5t+1)][-(c5t+1)+c6]=((double)(333))/1000*(a[(c5t+1)-1][1+-(c5t+1)+c6]+a[(c5t+1)-1][-(c5t+1)+c6]+a[(c5t+1)-1][-(c5t+1)+c6-1]);
}
for (c5=c5t; c5<=c5t+1; c5=c5+1) {
register int cbv_5, cbv_6;
cbv_5=newub_c6+1;
cbv_6=min(128*c4+127,c5+N-2);
#pragma ivdep
#pragma vector always
for (c6=cbv_5; c6<=cbv_6; c6=c6+1) {
a[c5][-c5+c6]=((double)(333))/1000*(a[c5-1][1+-c5+c6]+a[c5-1][-c5+c6]+a[c5-1][-c5+c6-1]);
}
}
}
for (c5=c5t; c5<=min(min(128*c4+126,T-1),32*c1-32*c2+31); c5=c5+1) {
register int cbv_7, cbv_8;
cbv_7=max(c5+1,128*c4);
cbv_8=min(128*c4+127,c5+N-2)-7;
#pragma ivdep
#pragma vector always
for (c6t=cbv_7; c6t<=cbv_8; c6t=c6t+8) {
a[c5][-c5+c6t]=((double)(333))/1000*(a[c5-1][1+-c5+c6t]+a[c5-1][-c5+c6t]+a[c5-1][-c5+c6t-1]);
a[c5][-c5+(c6t+1)]=((double)(333))/1000*(a[c5-1][1+-c5+(c6t+1)]+a[c5-1][-c5+(c6t+1)]+a[c5-1][-c5+(c6t+1)-1]);
a[c5][-c5+(c6t+2)]=((double)(333))/1000*(a[c5-1][1+-c5+(c6t+2)]+a[c5-1][-c5+(c6t+2)]+a[c5-1][-c5+(c6t+2)-1]);
a[c5][-c5+(c6t+3)]=((double)(333))/1000*(a[c5-1][1+-c5+(c6t+3)]+a[c5-1][-c5+(c6t+3)]+a[c5-1][-c5+(c6t+3)-1]);
a[c5][-c5+(c6t+4)]=((double)(333))/1000*(a[c5-1][1+-c5+(c6t+4)]+a[c5-1][-c5+(c6t+4)]+a[c5-1][-c5+(c6t+4)-1]);
a[c5][-c5+(c6t+5)]=((double)(333))/1000*(a[c5-1][1+-c5+(c6t+5)]+a[c5-1][-c5+(c6t+5)]+a[c5-1][-c5+(c6t+5)-1]);
a[c5][-c5+(c6t+6)]=((double)(333))/1000*(a[c5-1][1+-c5+(c6t+6)]+a[c5-1][-c5+(c6t+6)]+a[c5-1][-c5+(c6t+6)-1]);
a[c5][-c5+(c6t+7)]=((double)(333))/1000*(a[c5-1][1+-c5+(c6t+7)]+a[c5-1][-c5+(c6t+7)]+a[c5-1][-c5+(c6t+7)-1]);
}
register int cbv_9;
cbv_9=min(128*c4+127,c5+N-2);
#pragma ivdep
#pragma vector always
for (c6=c6t; c6<=cbv_9; c6=c6+1) {
a[c5][-c5+c6]=((double)(333))/1000*(a[c5-1][1+-c5+c6]+a[c5-1][-c5+c6]+a[c5-1][-c5+c6-1]);
}
}
}
/*@ end @*/
}
}
/* End of CLooG code */
orio_t_end = rtclock();
orio_t_total += orio_t_end - orio_t_start;
}
orio_t_total = orio_t_total / REPS;
printf("%f\n", orio_t_total);
return a[0][0];
}
|
GB_binop__bshift_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bshift_int16)
// A.*B function (eWiseMult): GB (_AemultB_01__bshift_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__bshift_int16)
// A.*B function (eWiseMult): GB (_AemultB_03__bshift_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bshift_int16)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bshift_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__bshift_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bshift_int16)
// C=scalar+B GB (_bind1st__bshift_int16)
// C=scalar+B' GB (_bind1st_tran__bshift_int16)
// C=A+scalar GB (_bind2nd__bshift_int16)
// C=A'+scalar GB (_bind2nd_tran__bshift_int16)
// C type: int16_t
// A type: int16_t
// B,b type: int8_t
// BinaryOp: cij = GB_bitshift_int16 (aij, bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
0
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_bitshift_int16 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSHIFT || GxB_NO_INT16 || GxB_NO_BSHIFT_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bshift_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bshift_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bshift_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bshift_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bshift_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bshift_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bshift_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bshift_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bshift_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_bitshift_int16 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bshift_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_bitshift_int16 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_int16 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__bshift_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_int16 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__bshift_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
LAGraph_BF_full.c | //------------------------------------------------------------------------------
// LAGraph_BF_full.c: Bellman-Ford single-source shortest paths, returns tree
//------------------------------------------------------------------------------
// LAGraph, (c) 2021 by The LAGraph Contributors, All Rights Reserved.
// SPDX-License-Identifier: BSD-2-Clause
// See additional acknowledgments in the LICENSE file,
// or contact permission@sei.cmu.edu for the full terms.
// Contributed by Jinhao Chen and Timothy A. Davis, Texas A&M University
//------------------------------------------------------------------------------
// LAGraph_BF_full: Bellman-Ford single source shortest paths, returning both
// the path lengths and the shortest-path tree.
// LAGraph_BF_full performs a Bellman-Ford to find out shortest path, parent
// nodes along the path and the hops (number of edges) in the path from given
// source vertex s in the range of [0, n) on graph given as matrix A with size
// n*n. The sparse matrix A has entry A(i, j) if there is an edge from vertex i
// to vertex j with weight w, then A(i, j) = w. Furthermore, LAGraph_BF_full
// requires A(i, i) = 0 for all 0 <= i < n.
// LAGraph_BF_full returns GrB_SUCCESS if successful, and GrB_NO_VALUE if it
// detects the existence of negative- weight cycle. The GrB_Vector d(k), pi(k)
// and h(k) (i.e., *pd_output, *ppi_output and *ph_output respectively) will
// be NULL when negative-weight cycle detected. Otherwise, the vector d has
// d(k) as the shortest distance from s to k. pi(k) = p+1, where p is the
// parent node of k-th node in the shortest path. In particular, pi(s) = 0.
// h(k) = hop(s, k), the number of edges from s to k in the shortest path.
//------------------------------------------------------------------------------
#define LG_FREE_ALL \
{ \
GrB_free(&d); \
GrB_free(&dtmp); \
GrB_free(&Atmp); \
GrB_free(&BF_Tuple3); \
GrB_free(&BF_lMIN_Tuple3); \
GrB_free(&BF_PLUSrhs_Tuple3); \
GrB_free(&BF_EQ_Tuple3); \
GrB_free(&BF_lMIN_Tuple3_Monoid); \
GrB_free(&BF_lMIN_PLUSrhs_Tuple3); \
LAGraph_Free ((void**)&I, NULL); \
LAGraph_Free ((void**)&J, NULL); \
LAGraph_Free ((void**)&w, NULL); \
LAGraph_Free ((void**)&W, NULL); \
LAGraph_Free ((void**)&h, NULL); \
LAGraph_Free ((void**)&pi, NULL); \
}
#include <LAGraph.h>
#include <LAGraphX.h>
#include <LG_internal.h> // from src/utility
typedef void (*LAGraph_binary_function) (void *, const void *, const void *) ;
//------------------------------------------------------------------------------
// data type for each entry of the adjacent matrix A and "distance" vector d;
// <INFINITY,INFINITY,INFINITY> corresponds to nonexistence of a path, and
// the value <0, 0, NULL> corresponds to a path from a vertex to itself
//------------------------------------------------------------------------------
typedef struct
{
double w; // w corresponds to a path weight.
GrB_Index h; // h corresponds to a path size or number of hops.
GrB_Index pi;// pi corresponds to the penultimate vertex along a path.
// vertex indexed as 1, 2, 3, ... , V, and pi = 0 (as nil)
// for u=v, and pi = UINT64_MAX (as inf) for (u,v) not in E
}
BF_Tuple3_struct;
//------------------------------------------------------------------------------
// 2 binary functions, z=f(x,y), where Tuple3xTuple3 -> Tuple3
//------------------------------------------------------------------------------
void BF_lMIN
(
BF_Tuple3_struct *z,
const BF_Tuple3_struct *x,
const BF_Tuple3_struct *y
)
{
if (x->w < y->w
|| (x->w == y->w && x->h < y->h)
|| (x->w == y->w && x->h == y->h && x->pi < y->pi))
{
if (z != x) { *z = *x; }
}
else
{
*z = *y;
}
}
void BF_PLUSrhs
(
BF_Tuple3_struct *z,
const BF_Tuple3_struct *x,
const BF_Tuple3_struct *y
)
{
z->w = x->w + y->w ;
z->h = x->h + y->h ;
z->pi = (x->pi != UINT64_MAX && y->pi != 0) ? y->pi : x->pi ;
}
void BF_EQ
(
bool *z,
const BF_Tuple3_struct *x,
const BF_Tuple3_struct *y
)
{
(*z) = (x->w == y->w && x->h == y->h && x->pi == y->pi) ;
}
// Given a n-by-n adjacency matrix A and a source vertex s.
// If there is no negative-weight cycle reachable from s, return the distances
// of shortest paths from s and parents along the paths as vector d. Otherwise,
// returns d=NULL if there is a negtive-weight cycle.
// pd_output is pointer to a GrB_Vector, where the i-th entry is d(s,i), the
// sum of edges length in the shortest path
// ppi_output is pointer to a GrB_Vector, where the i-th entry is pi(i), the
// parent of i-th vertex in the shortest path
// ph_output is pointer to a GrB_Vector, where the i-th entry is h(s,i), the
// number of edges from s to i in the shortest path
// A has zeros on diagonal and weights on corresponding entries of edges
// s is given index for source vertex
GrB_Info LAGraph_BF_full
(
GrB_Vector *pd_output, //the pointer to the vector of distance
GrB_Vector *ppi_output, //the pointer to the vector of parent
GrB_Vector *ph_output, //the pointer to the vector of hops
const GrB_Matrix A, //matrix for the graph
const GrB_Index s //given index of the source
)
{
GrB_Info info;
char *msg = NULL ;
// tmp vector to store distance vector after n (i.e., V) loops
GrB_Vector d = NULL, dtmp = NULL;
GrB_Matrix Atmp = NULL;
GrB_Type BF_Tuple3;
GrB_BinaryOp BF_lMIN_Tuple3;
GrB_BinaryOp BF_PLUSrhs_Tuple3;
GrB_BinaryOp BF_EQ_Tuple3;
GrB_Monoid BF_lMIN_Tuple3_Monoid;
GrB_Semiring BF_lMIN_PLUSrhs_Tuple3;
GrB_Index nrows, ncols, n, nz; // n = # of row/col, nz = # of nnz in graph
GrB_Index *I = NULL, *J = NULL; // for col/row indices of entries from A
GrB_Index *h = NULL, *pi = NULL;
double *w = NULL;
BF_Tuple3_struct *W = NULL;
LG_ASSERT (A != NULL && pd_output != NULL &&
ppi_output != NULL && ph_output != NULL, GrB_NULL_POINTER) ;
*pd_output = NULL;
*ppi_output = NULL;
*ph_output = NULL;
GRB_TRY (GrB_Matrix_nrows (&nrows, A)) ;
GRB_TRY (GrB_Matrix_ncols (&ncols, A)) ;
GRB_TRY (GrB_Matrix_nvals (&nz, A));
LG_ASSERT_MSG (nrows == ncols, -1002, "A must be square") ;
n = nrows;
LG_ASSERT_MSG (s < n, GrB_INVALID_INDEX, "invalid source node") ;
//--------------------------------------------------------------------------
// create all GrB_Type GrB_BinaryOp GrB_Monoid and GrB_Semiring
//--------------------------------------------------------------------------
// GrB_Type
GRB_TRY (GrB_Type_new(&BF_Tuple3, sizeof(BF_Tuple3_struct)));
// GrB_BinaryOp
GRB_TRY (GrB_BinaryOp_new(&BF_EQ_Tuple3,
(LAGraph_binary_function) (&BF_EQ), GrB_BOOL, BF_Tuple3, BF_Tuple3));
GRB_TRY (GrB_BinaryOp_new(&BF_lMIN_Tuple3,
(LAGraph_binary_function) (&BF_lMIN), BF_Tuple3, BF_Tuple3, BF_Tuple3));
GRB_TRY (GrB_BinaryOp_new(&BF_PLUSrhs_Tuple3,
(LAGraph_binary_function)(&BF_PLUSrhs),
BF_Tuple3, BF_Tuple3, BF_Tuple3));
// GrB_Monoid
BF_Tuple3_struct BF_identity = (BF_Tuple3_struct) { .w = INFINITY,
.h = UINT64_MAX, .pi = UINT64_MAX };
GRB_TRY (GrB_Monoid_new_UDT(&BF_lMIN_Tuple3_Monoid, BF_lMIN_Tuple3,
&BF_identity));
//GrB_Semiring
GRB_TRY (GrB_Semiring_new(&BF_lMIN_PLUSrhs_Tuple3,
BF_lMIN_Tuple3_Monoid, BF_PLUSrhs_Tuple3));
//--------------------------------------------------------------------------
// allocate arrays used for tuplets
//--------------------------------------------------------------------------
LAGRAPH_TRY (LAGraph_Malloc ((void **) &I, nz, sizeof(GrB_Index), msg)) ;
LAGRAPH_TRY (LAGraph_Malloc ((void **) &J, nz, sizeof(GrB_Index), msg)) ;
LAGRAPH_TRY (LAGraph_Malloc ((void **) &w, nz, sizeof(double), msg)) ;
LAGRAPH_TRY (LAGraph_Malloc ((void **) &W, nz, sizeof(BF_Tuple3_struct),
msg)) ;
//--------------------------------------------------------------------------
// create matrix Atmp based on A, while its entries become BF_Tuple3 type
//--------------------------------------------------------------------------
GRB_TRY (GrB_Matrix_extractTuples_FP64(I, J, w, &nz, A));
int nthreads, nthreads_outer, nthreads_inner ;
LG_TRY (LAGraph_GetNumThreads (&nthreads_outer, &nthreads_inner, msg)) ;
nthreads = nthreads_outer * nthreads_inner ;
printf ("nthreads %d\n", nthreads) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (GrB_Index k = 0; k < nz; k++)
{
if (w[k] == 0) //diagonal entries
{
W[k] = (BF_Tuple3_struct) { .w = 0, .h = 0, .pi = 0 };
}
else
{
W[k] = (BF_Tuple3_struct) { .w = w[k], .h = 1, .pi = I[k] + 1 };
}
}
GRB_TRY (GrB_Matrix_new(&Atmp, BF_Tuple3, n, n));
GRB_TRY (GrB_Matrix_build_UDT(Atmp, I, J, W, nz, BF_lMIN_Tuple3));
//--------------------------------------------------------------------------
// create and initialize "distance" vector d
//--------------------------------------------------------------------------
GRB_TRY (GrB_Vector_new(&d, BF_Tuple3, n));
// initial distance from s to itself
BF_Tuple3_struct d0 = (BF_Tuple3_struct) { .w = 0, .h = 0, .pi = 0 };
GRB_TRY (GrB_Vector_setElement_UDT(d, &d0, s));
//--------------------------------------------------------------------------
// start the Bellman Ford process
//--------------------------------------------------------------------------
// copy d to dtmp in order to create a same size of vector
GRB_TRY (GrB_Vector_dup(&dtmp, d));
bool same= false; // variable indicating if d == dtmp
int64_t iter = 0; // number of iterations
// terminate when no new path is found or more than V-1 loops
while (!same && iter < n - 1)
{
// execute semiring on d and A, and save the result to dtmp
GRB_TRY (GrB_vxm(dtmp, GrB_NULL, GrB_NULL, BF_lMIN_PLUSrhs_Tuple3,
d, Atmp, GrB_NULL));
LG_TRY (LAGraph_Vector_IsEqual_op(&same, dtmp, d, BF_EQ_Tuple3, NULL));
if (!same)
{
GrB_Vector ttmp = dtmp;
dtmp = d;
d = ttmp;
}
iter ++;
}
// check for negative-weight cycle only when there was a new path in the
// last loop, otherwise, there can't be a negative-weight cycle.
if (!same)
{
// execute semiring again to check for negative-weight cycle
GRB_TRY (GrB_vxm(dtmp, GrB_NULL, GrB_NULL, BF_lMIN_PLUSrhs_Tuple3,
d, Atmp, GrB_NULL));
// if d != dtmp, then there is a negative-weight cycle in the graph
LG_TRY (LAGraph_Vector_IsEqual_op(&same, dtmp, d, BF_EQ_Tuple3, NULL));
if (!same)
{
// printf("A negative-weight cycle found. \n");
LG_FREE_ALL;
return (GrB_NO_VALUE) ;
}
}
//--------------------------------------------------------------------------
// extract tuple from "distance" vector d and create GrB_Vectors for output
//--------------------------------------------------------------------------
GRB_TRY (GrB_Vector_extractTuples_UDT (I, (void *) W, &nz, d));
LAGRAPH_TRY (LAGraph_Malloc ((void **) &h , nz, sizeof(GrB_Index), msg)) ;
LAGRAPH_TRY (LAGraph_Malloc ((void **) &pi, nz, sizeof(GrB_Index), msg)) ;
for (GrB_Index k = 0; k < nz; k++)
{
w [k] = W[k].w ;
h [k] = W[k].h ;
pi[k] = W[k].pi;
}
GRB_TRY (GrB_Vector_new(pd_output, GrB_FP64, n));
GRB_TRY (GrB_Vector_new(ppi_output, GrB_UINT64, n));
GRB_TRY (GrB_Vector_new(ph_output, GrB_UINT64, n));
GRB_TRY (GrB_Vector_build_FP64 (*pd_output , I, w , nz,GrB_MIN_FP64 ));
GRB_TRY (GrB_Vector_build_UINT64(*ppi_output, I, pi, nz,GrB_MIN_UINT64));
GRB_TRY (GrB_Vector_build_UINT64(*ph_output , I, h , nz,GrB_MIN_UINT64));
LG_FREE_ALL;
return (GrB_SUCCESS) ;
}
|
pvm-OpenMP-columnas.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
main(int argc, char **argv) {
int N = atoi(argv[1]);
int i,j;
int m[N][N];
int v1[N],v2[N];
double start,end,elapsed;
if(argc < 2) {
fprintf(stderr,"Faltan argumentos\n");
exit(-1);
}
//Inicializamos
for(i = 0; i<N;i++){
v1[i]= i;
v2[i] = 0;
for(j=0;j<N;j++)
m[i][j] = i + j;
}
start = omp_get_wtime();
//Multiplicamos
for (i = 0; i < N; ++i){
int suma = 0;
#pragma omp parallel for
for (j = 0; j < N; ++j)
v2[i] += m[i][j] * v1[j];
}
end = omp_get_wtime();
elapsed = end - start;
//Imprimimos
printf("Vector Resultante\n");
for(i = 0; i<N;i++)
printf("v2[%d] = %d\n",i,v2[i]);
printf("Tiempo(seg.):%11.9f\t / Tamaño Vectores:%u\n",elapsed,N);
}
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 32;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=2*Nt-2;t1++) {
lbp=ceild(t1+2,2);
ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(ceild(t1-12,16),ceild(4*t2-Nz-19,32));t3<=min(min(floord(4*Nt+Ny-9,32),floord(2*t1+Ny-3,32)),floord(4*t2+Ny-9,32));t3++) {
for (t4=max(max(ceild(t1-1020,1024),ceild(4*t2-Nz-2035,2048)),ceild(32*t3-Ny-2035,2048));t4<=min(min(min(floord(4*Nt+Nx-9,2048),floord(2*t1+Nx-3,2048)),floord(4*t2+Nx-9,2048)),floord(32*t3+Nx+19,2048));t4++) {
for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(32*t3-Ny+5,4)),ceild(2048*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) {
for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(32*t3,4*t5+4);t7<=min(32*t3+31,4*t5+Ny-5);t7++) {
lbv=max(2048*t4,4*t5+4);
ubv=min(2048*t4+2047,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
quansimbench-sharemem.c | /////////////////////////////////////////////////////////////////////////////
// Quantum Factorization Simulation as a Benchmark for HPC
// Shared memory version
// Verifies that the area under the peaks of the Quantum Fourier Transform
// of delta(2^x mod n,1) is larger than 1/2, where n=p*q is an
// integer that satisfies n^2<=2^QUBITS<2n^2 and maximizes the period r of 2^x mod n with r even and 2^(r/2)~=-1 mod n.
// It is a simplification of Shor's factorization algorithm
// (c) Santiago Ignacio Betelu, Denton 2018
// Thanks Datavortex Technologies, UNT/HPC, TACC and LANL for providing the hardware and research support for developing this benchmark.
// gcc -Ofast quansimbench-sharemem.c -o quansimbench -lm -Wall -fopenmp
// export OMP_NUM_THREADS=32
// ./quansimbench
// _______ ______ _ ______ _
// (_______) / _____|_) (____ \ | |
// _ _ _ _ _____ ____ ( (____ _ ____ ____) )_____ ____ ____| |__
// | | | || | | (____ | _ \ \____ \| | \| __ (| ___ | _ \ / ___) _ |
// | |__| || |_| / ___ | | | |_____) ) | | | | |__) ) ____| | | ( (___| | | |
// \______)____/\_____|_| |_(______/|_|_|_|_|______/|_____)_| |_|\____)_| |_|
//
////////////////////////////////////////////////////////////////////////////////
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#include <complex.h>
#include <stdint.h>
#include <time.h>
#include <omp.h>
#define MINQUBITS 9
#define MAXQUBITS 60
complex float *c=NULL; // quantum amplitudes
int64_t QUBITS,N;
/////////////////////////////////////////////////////////////////////////////
// Quantum numstates addressing example with 4 nodes.
// 1- The QUBITS-NODEBITS least significant bits can be swapped within each node.
// 2- The NODEBITS most significant digits is node number
// NODE
// | Local bits
// c0 00 000 c16 10 000
// c1 00 001 c17 10 001
// c2 00 010 c18 10 010
// c3 00 011 N0 c19 10 011 N2
// c4 00 100 c20 10 100
// c5 00 101 c21 10 101
// c6 00 110 c22 10 110
// c7 00 111 c23 10 111
// ...... ......
// c8 01 000 c24 11 000
// c9 01 001 c25 11 001
// c10 01 010 c26 11 010
// c11 01 011 N1 c27 11 011 N3
// c12 01 100 c28 11 100
// c13 01 101 c29 11 101
// c14 01 110 c30 11 110
// c15 01 111 c31 11 111
// ...... ......
//////////////////////////////////////////////////////////////////////////////
// H= | 1 1 |
// | 1 -1 | /sqrt(2)
static void H(int64_t qubit){ // Hadamard gate acting on qubit
int64_t x,y,mask1,mask2,q;
complex float aux;
mask1= (0xFFFFFFFFFFFFFFFFll<<qubit); // to avoid branching and half of memory accesses
mask2= ~mask1;
mask1= (mask1<<1);
#pragma omp parallel for private(x,y,aux)
for(q=0;q<N/2;q++){
x= ((q<<1)&mask1) | (q&mask2); // 64 bit index with 0 on the qubit'th position
y= x|(1ll<<qubit); // index with 1 on the qubit'th position
aux= (c[x]-c[y])*M_SQRT1_2;
c[x]= (c[x]+c[y])*M_SQRT1_2;
c[y]=aux;
}
return;
}
//////////////////////////////////////////////////////////////////////////////
static void SWAP(int64_t qubit1, int64_t qubit2){ // SWAP between qubit1 and qubit2, qubit1!=quibit2
int64_t x,y,b1,b2,q;
complex float aux;
//
if(qubit1>qubit2){ // sort qubit1 < qubit2
q=qubit1;
qubit1=qubit2;
qubit2=q;
}
#pragma omp parallel for private(x,y,b1,b2,aux)
for(x=0;x<N;x++){
y= (x^(1ll<<qubit1))^(1ll<<qubit2);
if(y>x){ // to avoid overwriting previously computed
b1= (x>>qubit1)&1ll;
b2= (x>>qubit2)&1ll;
if(b1!=b2){
aux= c[x];
c[x]=c[y];
c[y]=aux;
}
}
}
return;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
static void init_expphase(int64_t nq,complex float *expphase){ // initialize the phase exponentials
float phase;
int64_t k;
for(k=1;k<=nq;k++){
phase= M_PI*powf(2.0,-(float)k);
expphase[k]= cexpf(I*phase);
}
}
static void CPN(int64_t qubit1, int64_t nq, complex float *expphase){ // PHASE between control qubit1 and qubit+1,2,3,..nq, phase= pi/2^1, pi/2^2,...
int64_t x,b1,b2,k,qubit2;
#pragma omp parallel for private(x,b1,b2,k,qubit2)
for(x=0;x<N;x++){
b1= ((x>>qubit1)&1ll);
if( b1 == 1 ){
for(k=1;k<=nq;k++){
qubit2=qubit1-k;
if(qubit2>=0){
b2= ((x>>qubit2)&1ll);
if( b2==1 ) c[x]=c[x]*expphase[k];
}
}
}
}
return;
}
//////////////////////////////////////////////////////////////////////////////
static int64_t min(int64_t x, int64_t y){
if(x<y) return(x);
else return(y);
}
// (a^b) mod n
static int64_t powmod(int64_t a, int64_t b, int64_t n){
int64_t xq=1,yq=a; // avoid overflow of intermediate results
while(b>0){
if(b&1ll) xq=(xq*yq)%n;
yq=(yq*yq)%n; // squaring the base
b=b>>1;
}
return(xq%n);
}
//////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv){
int64_t nphase,max_nphase,n,mulperiod,peaknumber,x,l,q,numgates,npeaks,predictedx,threads=1;
struct timespec tim0,tim1;
double timeperstate,timeqft,prob,peakspacing,s; // don't change to float
char texfactors[32];
complex float *expphase=NULL;
// largest integers that can be factored with Shor's algoritm with register size 'qubits'
// n[qubits]= factor1[qubits]*factor2[qubits] 2^qubits <= n^2 < 2^{qubits+1}, qubits>=9
int64_t factor1[61]={0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 5, 7, 5, 11, 11, 5, 19, 23, 19, 23, 29, 47, 29, 29, 47, 71, 83, 79, 103, 149, 101, 149, 269, 167, 479, 479, 367, 859, 563, 1039, 947, 1307, 2027, 2039, 2357, 2237, 3917, 4127, 4813, 6173, 6029, 7243, 10357, 12757, 11399, 19427, 20771, 24847, 27779};
int64_t factor2[61]={0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 13, 11, 11, 23, 13, 23, 71, 23, 29, 53, 61, 67, 61, 139, 199, 173, 163, 197, 293, 317, 311, 647, 619, 487, 1109, 547, 773, 1427, 863, 1861, 1427, 2213, 2269, 2069, 2909, 3559, 5303, 4283, 5749, 6971, 7687, 11131, 13103, 12959, 14879, 23549, 19541, 25847, 30557, 38653};
#if defined(_OPENMP)
threads= omp_get_max_threads();
printf("OpenMP threads: %ld\n\n", threads);
#else
threads=1;
printf("Not using OpenMP\n\n");
#endif
printf("Qubits Factors Probability Time Coeffs/s Coeffs/s/thread Pass\n");
// pre-initialize the phase exponentials
max_nphase= 2+log2(1.0*MAXQUBITS);
expphase= malloc(max_nphase*sizeof(complex float));
init_expphase(max_nphase,expphase);
// iterate over number of qubits
for(QUBITS=MINQUBITS; QUBITS<=MAXQUBITS; QUBITS++){
N= (1ll<<QUBITS); // state vector size
c= realloc(c, N*sizeof(complex float));
n= factor1[QUBITS]*factor2[QUBITS]; // number to factor
mulperiod= (factor1[QUBITS]-1)*(factor2[QUBITS]-1); // Euler totient function is multiple of period of (2^x mod n)
peakspacing= 1.0*N/mulperiod; // so the space between peaks in the spectrum is a multiple of this
if(n*n>=N){ // n^2<N for Shor's algorithm validity
printf("Error n*n>=N\n");
exit(1);
}
// initial state is | z, 2^z mod n > collapsed by a measurement of second register with outcome 1
s=0.0; // for normalization
#pragma omp parallel for private(l,x) reduction(+:s)
for(x=0; x<N; x++){
c[x]=0.0;
l= powmod(2,x,n);
if(l==1){
c[x]=1.0;
s=s+ 1.0;
}
}
s=1.0/sqrt(s);
#pragma omp parallel for
for(x=0;x<N;x++) c[x]= c[x]*s; // normalize initial condition
nphase= 1 + (int64_t)log2(1.0*QUBITS); // number of phases in each step of Approximate Quantum Fourier Transform
clock_gettime(CLOCK_REALTIME,&tim0); // only time AQFT
// the Approximate Quantum Fourier Transform
numgates=0;
for(q=QUBITS-1;q>=0; q--){
H(q);
CPN(q,nphase,expphase); // all nphase phases folded into a single call
numgates=numgates+1+min(q,nphase);
}
for(q=0;q<QUBITS/2;q++){
SWAP(q,QUBITS-q-1);
numgates=numgates+1;
}
// end AQFT
clock_gettime(CLOCK_REALTIME,&tim1);
timeqft= 1.0*(tim1.tv_sec-tim0.tv_sec)+1.e-9*(tim1.tv_nsec-tim0.tv_nsec); // time of QFT in seconds
timeperstate= (N*numgates)/timeqft;
// compute probability that the solution is a multiple of peakspacing
prob=0.0;
npeaks= mulperiod;
for(peaknumber=0 ; peaknumber<=npeaks; peaknumber++){ // note that this lists << N peaks
if(peaknumber>0) {
predictedx= peaknumber*peakspacing +0.5; // state number x where a peak may occur, add 0.5 to round to nearest
if(predictedx>=0 && predictedx<N) prob=prob+creal(c[predictedx]*conjf(c[predictedx])); // resulting area under theoretical peaknumber
}
}
sprintf(texfactors,"%ld*%ld ",factor1[QUBITS], factor2[QUBITS]);
printf(" %ld %12s %13.6f %10.4e %10.4e %10.4e %4s\n", QUBITS, texfactors, prob, timeqft, timeperstate, timeperstate/threads, prob > 0.5 ? "yes" : "no");
fflush(stdout);
}
free(expphase);
free(c);
}
////////////////////////////////////////////////////////////////////////////////
|
GB_unaryop__abs_bool_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_bool_int32
// op(A') function: GB_tran__abs_bool_int32
// C type: bool
// A type: int32_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_BOOL || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_bool_int32
(
bool *restrict Cx,
const int32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_bool_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
spectra.c | /** @file spectra.c Documented spectra module
*
* Julien Lesgourgues, 25.08.2010
*
* This module computes the anisotropy and Fourier power spectra
* \f$ C_l^{X}, P(k), ... \f$'s given the transfer and Bessel functions
* (for anisotropy spectra), the source functions (for Fourier spectra)
* and the primordial spectra.
*
* The following functions can be called from other modules:
*
* -# spectra_init() at the beginning (but after transfer_init())
* -# spectra_cl_at_l() at any time for computing \f$ C_l \f$ at any l
* -# spectra_spectrum_at_z() at any time for computing P(k) at any z
* -# spectra_spectrum_at_k_and z() at any time for computing P at any k and z
* -# spectra_free() at the end
*/
#include "spectra.h"
int spectra_bandpower(struct spectra * psp,
int l1,
int l2,
double * TT_II,
double * TT_RI,
double * TT_RR
) {
int l;
int index_md;
double * cl_tot;
double ** cl_md;
double ** cl_md_ic;
class_alloc(cl_tot,psp->ct_size*sizeof(double),psp->error_message);
class_alloc(cl_md,psp->md_size*sizeof(double*),psp->error_message);
class_alloc(cl_md_ic,psp->md_size*sizeof(double*),psp->error_message);
for (index_md=0;index_md<psp->md_size; index_md++) {
class_alloc(cl_md[index_md],psp->ct_size*sizeof(double),psp->error_message);
class_alloc(cl_md_ic[index_md],psp->ct_size*psp->ic_ic_size[index_md]*sizeof(double),psp->error_message);
}
*TT_RR=0.;
*TT_RI=0.;
*TT_II=0.;
for (l=l1; l<=l2; l++) {
class_call(spectra_cl_at_l(psp,
(double)l,
cl_tot,
cl_md,
cl_md_ic),
psp->error_message,
psp->error_message);
*TT_RR += (double)(2*l+1)*cl_md_ic[psp->index_md_scalars][index_symmetric_matrix(0,0,psp->ic_size[psp->index_md_scalars])*psp->ct_size+psp->index_ct_tt];
*TT_RI += (double)(2*l+1)*cl_md_ic[psp->index_md_scalars][index_symmetric_matrix(0,1,psp->ic_size[psp->index_md_scalars])*psp->ct_size+psp->index_ct_tt]*2.;
*TT_II += (double)(2*l+1)*cl_md_ic[psp->index_md_scalars][index_symmetric_matrix(1,1,psp->ic_size[psp->index_md_scalars])*psp->ct_size+psp->index_ct_tt];
}
for (index_md=0;index_md<psp->md_size; index_md++) {
free(cl_md[index_md]);
free(cl_md_ic[index_md]);
}
free(cl_tot);
free(cl_md);
free(cl_md_ic);
return _SUCCESS_;
}
/**
* Anisotropy power spectra \f$ C_l\f$'s for all types, modes and initial conditions.
*
* This routine evaluates all the \f$C_l\f$'s at a given value of l by
* interpolating in the pre-computed table. When relevant, it also
* sums over all initial conditions for each mode, and over all modes.
*
* This function can be
* called from whatever module at whatever time, provided that
* spectra_init() has been called before, and spectra_free() has not
* been called yet.
*
* @param psp Input: pointer to spectra structure (containing pre-computed table)
* @param l Input: multipole number
* @param cl_tot Output: total \f$C_l\f$'s for all types (TT, TE, EE, etc..)
* @param cl_md Output: \f$C_l\f$'s for all types (TT, TE, EE, etc..) decomposed mode by mode (scalar, tensor, ...) when relevant
* @param cl_md_ic Output: \f$C_l\f$'s for all types (TT, TE, EE, etc..) decomposed by pairs of initial conditions (adiabatic, isocurvatures) for each mode (usually, only for the scalar mode) when relevant
* @return the error status
*/
int spectra_cl_at_l(
struct spectra * psp,
double l,
double * cl_tot, /* array with argument cl_tot[index_ct] (must be already allocated) */
double * * cl_md, /* array with argument cl_md[index_md][index_ct] (must be already allocated only if several modes) */
double * * cl_md_ic /* array with argument cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct] (must be already allocated for a given mode only if several ic's) */
) {
/** Summary: */
/** - define local variables */
int last_index;
int index_md;
int index_ic1,index_ic2,index_ic1_ic2;
int index_ct;
/** - (a) treat case in which there is only one mode and one initial condition.
Then, only cl_tot needs to be filled. */
if ((psp->md_size == 1) && (psp->ic_size[0] == 1)) {
index_md = 0;
if ((int)l <= psp->l[psp->l_size[index_md]-1]) {
/* interpolate at l */
class_call(array_interpolate_spline(psp->l,
psp->l_size[index_md],
psp->cl[index_md],
psp->ddcl[index_md],
psp->ct_size,
l,
&last_index,
cl_tot,
psp->ct_size,
psp->error_message),
psp->error_message,
psp->error_message);
/* set to zero for the types such that l<l_max */
for (index_ct=0; index_ct<psp->ct_size; index_ct++)
if ((int)l > psp->l_max_ct[index_md][index_ct])
cl_tot[index_ct]=0.;
}
else {
for (index_ct=0; index_ct<psp->ct_size; index_ct++)
cl_tot[index_ct]=0.;
}
}
/** - (b) treat case in which there is only one mode
with several initial condition.
Fill cl_md_ic[index_md=0] and sum it to get cl_tot. */
if ((psp->md_size == 1) && (psp->ic_size[0] > 1)) {
index_md = 0;
for (index_ct=0; index_ct<psp->ct_size; index_ct++)
cl_tot[index_ct]=0.;
for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]);
if (((int)l <= psp->l[psp->l_size[index_md]-1]) &&
(psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_)) {
class_call(array_interpolate_spline(psp->l,
psp->l_size[index_md],
psp->cl[index_md],
psp->ddcl[index_md],
psp->ic_ic_size[index_md]*psp->ct_size,
l,
&last_index,
cl_md_ic[index_md],
psp->ic_ic_size[index_md]*psp->ct_size,
psp->error_message),
psp->error_message,
psp->error_message);
for (index_ct=0; index_ct<psp->ct_size; index_ct++)
if ((int)l > psp->l_max_ct[index_md][index_ct])
cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]=0.;
}
else {
for (index_ct=0; index_ct<psp->ct_size; index_ct++)
cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]=0.;
}
/* compute cl_tot by summing over cl_md_ic */
for (index_ct=0; index_ct<psp->ct_size; index_ct++) {
if (index_ic1 == index_ic2)
cl_tot[index_ct]+=cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct];
else
cl_tot[index_ct]+=2.*cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct];
}
}
}
}
/** - (c) loop over modes */
if (psp->md_size > 1) {
for (index_ct=0; index_ct<psp->ct_size; index_ct++)
cl_tot[index_ct]=0.;
for (index_md = 0; index_md < psp->md_size; index_md++) {
/** - --> (c.1.) treat case in which the mode under consideration
has only one initial condition.
Fill cl_md[index_md]. */
if (psp->ic_size[index_md] == 1) {
if ((int)l <= psp->l[psp->l_size[index_md]-1]) {
class_call(array_interpolate_spline(psp->l,
psp->l_size[index_md],
psp->cl[index_md],
psp->ddcl[index_md],
psp->ct_size,
l,
&last_index,
cl_md[index_md],
psp->ct_size,
psp->error_message),
psp->error_message,
psp->error_message);
for (index_ct=0; index_ct<psp->ct_size; index_ct++)
if ((int)l > psp->l_max_ct[index_md][index_ct])
cl_md[index_md][index_ct]=0.;
}
else {
for (index_ct=0; index_ct<psp->ct_size; index_ct++)
cl_md[index_md][index_ct]=0.;
}
}
/** - --> (c.2.) treat case in which the mode under consideration
has several initial conditions.
Fill cl_md_ic[index_md] and sum it to get cl_md[index_md] */
if (psp->ic_size[index_md] > 1) {
if ((int)l <= psp->l[psp->l_size[index_md]-1]) {
/* interpolate all ic and ct */
class_call(array_interpolate_spline(psp->l,
psp->l_size[index_md],
psp->cl[index_md],
psp->ddcl[index_md],
psp->ic_ic_size[index_md]*psp->ct_size,
l,
&last_index,
cl_md_ic[index_md],
psp->ic_ic_size[index_md]*psp->ct_size,
psp->error_message),
psp->error_message,
psp->error_message);
/* set to zero some of the components */
for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]);
for (index_ct=0; index_ct<psp->ct_size; index_ct++) {
if (((int)l > psp->l_max_ct[index_md][index_ct]) || (psp->is_non_zero[index_md][index_ic1_ic2] == _FALSE_))
cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]=0.;
}
}
}
}
/* if l was too big, set anyway all components to zero */
else {
for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]);
for (index_ct=0; index_ct<psp->ct_size; index_ct++) {
cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]=0.;
}
}
}
}
/* sum up all ic for each mode */
for (index_ct=0; index_ct<psp->ct_size; index_ct++) {
cl_md[index_md][index_ct]=0.;
for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]);
if (index_ic1 == index_ic2)
cl_md[index_md][index_ct]+=cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct];
else
cl_md[index_md][index_ct]+=2.*cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct];
}
}
}
}
/** - --> (c.3.) add contribution of cl_md[index_md] to cl_tot */
for (index_ct=0; index_ct<psp->ct_size; index_ct++)
cl_tot[index_ct]+=cl_md[index_md][index_ct];
}
}
return _SUCCESS_;
}
/**
* Matter power spectrum for arbitrary redshift and for all initial conditions.
*
* This routine evaluates the matter power spectrum at a given value of z by
* interpolating in the pre-computed table (if several values of z have been stored)
* or by directly reading it (if it only contains values at z=0 and we want P(k,z=0))
*
*
* Can be called in two modes: linear or logarithmic.
*
* - linear: returns P(k) (units: \f$ Mpc^3\f$)
*
* - logarithmic: returns \f$\ln{P(k)}\f$
*
* One little subtlety: in case of several correlated initial conditions,
* the cross-correlation spectrum can be negative. Then, in logarithmic mode,
* the non-diagonal elements contain the cross-correlation angle \f$ P_{12}/\sqrt{P_{11} P_{22}}\f$
* (from -1 to 1) instead of \f$\ln{P_{12}}\f$
*
* This function can be
* called from whatever module at whatever time, provided that
* spectra_init() has been called before, and spectra_free() has not
* been called yet.
*
* @param pba Input: pointer to background structure (used for converting z into tau)
* @param psp Input: pointer to spectra structure (containing pre-computed table)
* @param mode Input: linear or logarithmic
* @param z Input: redshift
* @param output_tot Output: total matter power spectrum P(k) in \f$ Mpc^3 \f$ (linear mode), or its logarithms (logarithmic mode)
* @param output_ic Output: for each pair of initial conditions, matter power spectra P(k) in \f$ Mpc^3 \f$ (linear mode), or their logarithms and cross-correlation angles (logarithmic mode)
* @return the error status
*/
int spectra_pk_at_z(
struct background * pba,
struct spectra * psp,
enum linear_or_logarithmic mode,
double z,
double * output_tot, /* array with argument output_tot[index_k] (must be already allocated) */
double * output_ic /* array with argument output_tot[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2] (must be already allocated only if more than one initial condition) */
) {
/** Summary: */
/** - define local variables */
int index_md;
int last_index;
int index_k;
double tau,ln_tau;
int index_ic1,index_ic2,index_ic1_ic2;
index_md = psp->index_md_scalars;
/** - first step: convert z into \f$\ln{\tau}\f$ */
class_call(background_tau_of_z(pba,z,&tau),
pba->error_message,
psp->error_message);
class_test(tau <= 0.,
psp->error_message,
"negative or null value of conformal time: cannot interpolate");
ln_tau = log(tau);
/** - second step: for both modes (linear or logarithmic), store the spectrum in logarithmic format in the output array(s) */
/** - --> (a) if only values at tau=tau_today are stored and we want \f$ P(k,z=0)\f$, no need to interpolate */
if (psp->ln_tau_size == 1) {
class_test(z != 0.,
psp->error_message,
"asked z=%e but only P(k,z=0) has been tabulated",z);
for (index_k=0; index_k<psp->ln_k_size; index_k++)
if (psp->ic_size[index_md] == 1) {
output_tot[index_k] = psp->ln_pk[index_k];
}
else {
for (index_ic1_ic2 = 0; index_ic1_ic2 < psp->ic_ic_size[index_md]; index_ic1_ic2++) {
output_ic[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2] =
psp->ln_pk[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2];
}
}
}
/** - --> (b) if several values of tau have been stored, use interpolation routine to get spectra at correct redshift */
else {
if (psp->ic_ic_size[index_md] == 1) {
class_call(array_interpolate_spline(psp->ln_tau,
psp->ln_tau_size,
psp->ln_pk,
psp->ddln_pk,
psp->ln_k_size,
ln_tau,
&last_index,
output_tot,
psp->ln_k_size,
psp->error_message),
psp->error_message,
psp->error_message);
}
else {
class_call(array_interpolate_spline(psp->ln_tau,
psp->ln_tau_size,
psp->ln_pk,
psp->ddln_pk,
psp->ic_ic_size[index_md]*psp->ln_k_size,
ln_tau,
&last_index,
output_ic,
psp->ic_ic_size[index_md]*psp->ln_k_size,
psp->error_message),
psp->error_message,
psp->error_message);
}
}
/** - third step: if there are several initial conditions, compute the total P(k) and set back all uncorrelated coefficients to exactly zero. Check positivity of total P(k). */
if (psp->ic_size[index_md] > 1) {
for (index_k=0; index_k<psp->ln_k_size; index_k++) {
output_tot[index_k] = 0.;
for (index_ic1=0; index_ic1 < psp->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]);
if (index_ic1 == index_ic2) {
output_tot[index_k] += exp(output_ic[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2]);
}
else {
if (psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) {
output_tot[index_k] +=
2. * output_ic[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2] *
sqrt(exp(output_ic[index_k * psp->ic_ic_size[index_md] + index_symmetric_matrix(index_ic1,index_ic1,psp->ic_size[index_md])]) *
exp(output_ic[index_k * psp->ic_ic_size[index_md] + index_symmetric_matrix(index_ic2,index_ic2,psp->ic_size[index_md])]));
}
else
output_ic[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2] = 0.;
}
}
}
class_test(output_tot[index_k] <= 0.,
psp->error_message,
"for k=%e, z=%e, the matrix of initial condition amplitudes was not positive definite, hence P(k)_total=%e results negative",
exp(psp->ln_k[index_k]),z,output_tot[index_k]);
}
}
/** - fourth step: depending on requested mode (linear or logarithmic), apply necessary transformation to the output arrays */
/** - --> (a) linear mode: if only one initial condition, convert output_pk to linear format; if several initial conditions, convert output_ic to linear format, output_tot is already in this format */
if (mode == linear) {
if (psp->ic_size[index_md] == 1) {
for (index_k=0; index_k<psp->ln_k_size; index_k++) {
output_tot[index_k] = exp(output_tot[index_k]);
}
}
else {
for (index_k=0; index_k<psp->ln_k_size; index_k++) {
for (index_ic1=0; index_ic1 < psp->ic_size[index_md]; index_ic1++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic1,psp->ic_size[index_md]);
output_ic[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2] = exp(output_ic[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2]);
}
for (index_ic1=0; index_ic1 < psp->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1+1; index_ic2 < psp->ic_size[index_md]; index_ic2++) {
output_ic[index_k * psp->ic_ic_size[index_md] + index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md])] =
output_ic[index_k * psp->ic_ic_size[index_md] + index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md])]
*sqrt(output_ic[index_k * psp->ic_ic_size[index_md] + index_symmetric_matrix(index_ic1,index_ic1,psp->ic_size[index_md])] *
output_ic[index_k * psp->ic_ic_size[index_md] + index_symmetric_matrix(index_ic2,index_ic2,psp->ic_size[index_md])]);
}
}
}
}
}
/** - --> (b) logarithmic mode: if only one initial condition, nothing to be done; if several initial conditions, convert output_tot to logarithmic format, output_ic is already in this format */
else {
if (psp->ic_size[index_md] > 1) {
for (index_k=0; index_k<psp->ln_k_size; index_k++) {
/* we have already checked above that output_tot was positive */
output_tot[index_k] = log(output_tot[index_k]);
}
}
}
return _SUCCESS_;
}
/**
* Matter power spectrum for arbitrary wavenumber, redshift and initial condition.
*
* This routine evaluates the matter power spectrum at a given value of k and z by
* interpolating in a table of all P(k)'s computed at this z by spectra_pk_at_z() (when kmin <= k <= kmax),
* or eventually by using directly the primordial spectrum (when 0 <= k < kmin):
* the latter case is an approximation, valid when kmin << comoving Hubble scale today.
* Returns zero when k=0. Returns an error when k<0 or k > kmax.
*
* This function can be
* called from whatever module at whatever time, provided that
* spectra_init() has been called before, and spectra_free() has not
* been called yet.
*
* @param pba Input: pointer to background structure (used for converting z into tau)
* @param ppm Input: pointer to primordial structure (used only in the case 0 < k < kmin)
* @param psp Input: pointer to spectra structure (containing pre-computed table)
* @param k Input: wavenumber in 1/Mpc
* @param z Input: redshift
* @param pk_tot Output: total matter power spectrum P(k) in \f$ Mpc^3 \f$
* @param pk_ic Output: for each pair of initial conditions, matter power spectra P(k) in \f$ Mpc^3\f$
* @return the error status
*/
int spectra_pk_at_k_and_z(
struct background * pba,
struct primordial * ppm,
struct spectra * psp,
double k,
double z,
double * pk_tot, /* pointer to a single number (must be already allocated) */
double * pk_ic /* array of argument pk_ic[index_ic1_ic2] (must be already allocated only if several initial conditions) */
) {
/** Summary: */
/** - define local variables */
int index_md;
int index_k;
int last_index;
int index_ic1,index_ic2,index_ic1_ic2;
double * spectrum_at_z = NULL;
double * spectrum_at_z_ic = NULL;
double * spline;
double * pk_primordial_k = NULL;
double kmin;
double * pk_primordial_kmin = NULL;
index_md = psp->index_md_scalars;
/** - first step: check that k is in valid range [0:kmax] (the test for z will be done when calling spectra_pk_at_z()) */
class_test((k < 0.) || (k > exp(psp->ln_k[psp->ln_k_size-1])),
psp->error_message,
"k=%e out of bounds [%e:%e]",k,0.,exp(psp->ln_k[psp->ln_k_size-1]));
/** - deal with case 0 <= k < kmin */
if (k < exp(psp->ln_k[0])) {
/** - --> (a) subcase k=0: then P(k)=0 */
if (k == 0.) {
if (psp->ic_size[index_md] == 1) {
*pk_tot=0.;
}
else {
for (index_ic1_ic2 = 0; index_ic1_ic2 < psp->ic_ic_size[index_md]; index_ic1_ic2++) {
pk_ic[index_ic1_ic2] = 0.;
}
}
}
/** - --> (b) subcase 0<k<kmin: in this case we know that on super-Hubble scales:
* P(k) = [some number] * k * P_primordial(k)
* so
* P(k) = P(kmin) * (k P_primordial(k)) / (kmin P_primordial(kmin))
* (note that the result is accurate only if kmin is such that [a0 kmin] << H0)
*/
else {
/* compute P(k,z) which contains P(kmin,z)*/
class_alloc(spectrum_at_z,
psp->ln_k_size*sizeof(double),
psp->error_message);
if (psp->ic_size[index_md] > 1) {
class_alloc(spectrum_at_z_ic,
sizeof(double)*psp->ic_ic_size[index_md]*psp->ln_k_size,
psp->error_message);
}
class_call(spectra_pk_at_z(pba,
psp,
linear,
z,
spectrum_at_z,
spectrum_at_z_ic),
psp->error_message,
psp->error_message);
/* compute P_primordial(k) */
class_alloc(pk_primordial_k,
sizeof(double)*psp->ic_ic_size[index_md],
psp->error_message);
class_call(primordial_spectrum_at_k(ppm,
index_md,
linear,
k,
pk_primordial_k),
ppm->error_message,psp->error_message);
/* compute P_primordial(kmin) */
kmin = exp(psp->ln_k[0]);
class_alloc(pk_primordial_kmin,
sizeof(double)*psp->ic_ic_size[index_md],
psp->error_message);
class_call(primordial_spectrum_at_k(ppm,
index_md,
linear,
kmin,
pk_primordial_kmin),
ppm->error_message,
psp->error_message);
/* apply above analytic approximation for P(k) */
index_k=0;
if (psp->ic_size[index_md] == 1) {
index_ic1_ic2 = 0;
*pk_tot = spectrum_at_z[index_k]
*k*pk_primordial_k[index_ic1_ic2]
/kmin/pk_primordial_kmin[index_ic1_ic2];
}
else {
for (index_ic1_ic2 = 0; index_ic1_ic2 < psp->ic_ic_size[index_md]; index_ic1_ic2++) {
pk_ic[index_ic1_ic2] = spectrum_at_z_ic[index_ic1_ic2]
*k*pk_primordial_k[index_ic1_ic2]
/kmin/pk_primordial_kmin[index_ic1_ic2];
}
}
free(spectrum_at_z);
if (psp->ic_size[index_md] > 1)
free(spectrum_at_z_ic);
free(pk_primordial_k);
free(pk_primordial_kmin);
}
}
/** - deal with case kmin <= k <= kmax */
else {
/* compute P(k,z) (in logarithmic format for more accurate interpolation) */
class_alloc(spectrum_at_z,
psp->ln_k_size*sizeof(double),
psp->error_message);
if (psp->ic_size[index_md] > 1) {
class_alloc(spectrum_at_z_ic,
sizeof(double)*psp->ic_ic_size[index_md]*psp->ln_k_size,
psp->error_message);
}
class_call(spectra_pk_at_z(pba,
psp,
logarithmic,
z,
spectrum_at_z,
spectrum_at_z_ic),
psp->error_message,
psp->error_message);
/* get its second derivatives with spline, then interpolate, then convert to linear format */
class_alloc(spline,
sizeof(double)*psp->ic_ic_size[index_md]*psp->ln_k_size,
psp->error_message);
if (psp->ic_size[index_md] == 1) {
class_call(array_spline_table_lines(psp->ln_k,
psp->ln_k_size,
spectrum_at_z,
1,
spline,
_SPLINE_NATURAL_,
psp->error_message),
psp->error_message,
psp->error_message);
class_call(array_interpolate_spline(psp->ln_k,
psp->ln_k_size,
spectrum_at_z,
spline,
1,
log(k),
&last_index,
pk_tot,
1,
psp->error_message),
psp->error_message,
psp->error_message);
*pk_tot = exp(*pk_tot);
}
else {
class_call(array_spline_table_lines(psp->ln_k,
psp->ln_k_size,
spectrum_at_z_ic,
psp->ic_ic_size[index_md],
spline,
_SPLINE_NATURAL_,
psp->error_message),
psp->error_message,
psp->error_message);
class_call(array_interpolate_spline(psp->ln_k,
psp->ln_k_size,
spectrum_at_z_ic,
spline,
psp->ic_ic_size[index_md],
log(k),
&last_index,
pk_ic,
psp->ic_ic_size[index_md],
psp->error_message),
psp->error_message,
psp->error_message);
for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic1,psp->ic_size[index_md]);
pk_ic[index_ic1_ic2] = exp(pk_ic[index_ic1_ic2]);
}
for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1+1; index_ic2 < psp->ic_size[index_md]; index_ic2++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]);
if (psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) {
pk_ic[index_ic1_ic2] = pk_ic[index_ic1_ic2]*
sqrt(pk_ic[index_symmetric_matrix(index_ic1,index_ic1,psp->ic_size[index_md])]*
pk_ic[index_symmetric_matrix(index_ic2,index_ic2,psp->ic_size[index_md])]);
}
else {
pk_ic[index_ic1_ic2] = 0.;
}
}
}
free(spectrum_at_z_ic);
}
free(spectrum_at_z);
free(spline);
}
/** - last step: if more than one condition, sum over pk_ic to get pk_tot, and set back coefficients of non-correlated pairs to exactly zero. */
if (psp->ic_size[index_md] > 1) {
*pk_tot = 0.;
for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]);
if (psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) {
if (index_ic1 == index_ic2)
*pk_tot += pk_ic[index_ic1_ic2];
else
*pk_tot += 2.*pk_ic[index_ic1_ic2];
}
else {
pk_ic[index_ic1_ic2] = 0.;
}
}
}
class_test(*pk_tot <= 0.,
psp->error_message,
"for k=%e, the matrix of initial condition amplitudes was not positive definite, hence P(k)_total results negative",k);
}
return _SUCCESS_;
}
/**
* Non-linear total matter power spectrum for arbitrary redshift.
*
* This routine evaluates the non-linear matter power spectrum at a given value of z by
* interpolating in the pre-computed table (if several values of z have been stored)
* or by directly reading it (if it only contains values at z=0 and we want P(k,z=0))
*
*
* Can be called in two modes: linear or logarithmic.
*
* - linear: returns P(k) (units: Mpc^3)
*
* - logarithmic: returns ln(P(k))
*
* This function can be
* called from whatever module at whatever time, provided that
* spectra_init() has been called before, and spectra_free() has not
* been called yet.
*
* @param pba Input: pointer to background structure (used for converting z into tau)
* @param psp Input: pointer to spectra structure (containing pre-computed table)
* @param mode Input: linear or logarithmic
* @param z Input: redshift
* @param output_tot Output: total matter power spectrum P(k) in \f$ Mpc^3\f$ (linear mode), or its logarithms (logarithmic mode)
* @return the error status
*/
int spectra_pk_nl_at_z(
struct background * pba,
struct spectra * psp,
enum linear_or_logarithmic mode,
double z,
double * output_tot /* array with argument output_tot[index_k] (must be already allocated) */
) {
/** Summary: */
/** - define local variables */
int last_index;
int index_k;
double tau,ln_tau;
/** - first step: convert z into ln(tau) */
class_call(background_tau_of_z(pba,z,&tau),
pba->error_message,
psp->error_message);
class_test(tau <= 0.,
psp->error_message,
"negative or null value of conformal time: cannot interpolate");
ln_tau = log(tau);
/** - second step: for both modes (linear or logarithmic), store the spectrum in logarithmic format in the output array(s) */
/** - --> (a) if only values at tau=tau_today are stored and we want P(k,z=0), no need to interpolate */
if (psp->ln_tau_size == 1) {
class_test(z != 0.,
psp->error_message,
"asked z=%e but only P(k,z=0) has been tabulated",z);
for (index_k=0; index_k<psp->ln_k_size; index_k++) {
output_tot[index_k] = psp->ln_pk_nl[index_k];
}
}
/** - --> (b) if several values of tau have been stored, use interpolation routine to get spectra at correct redshift */
else {
class_call(array_interpolate_spline(psp->ln_tau,
psp->ln_tau_size,
psp->ln_pk_nl,
psp->ddln_pk_nl,
psp->ln_k_size,
ln_tau,
&last_index,
output_tot,
psp->ln_k_size,
psp->error_message),
psp->error_message,
psp->error_message);
}
/** - fourth step: eventually convert to linear format */
if (mode == linear) {
for (index_k=0; index_k<psp->ln_k_size; index_k++) {
output_tot[index_k] = exp(output_tot[index_k]);
}
}
return _SUCCESS_;
}
/**
* Non-linear total matter power spectrum for arbitrary wavenumber and redshift.
*
* This routine evaluates the matter power spectrum at a given value of k and z by
* interpolating in a table of all P(k)'s computed at this z by spectra_pk_nl_at_z() (when kmin <= k <= kmax),
* or eventually by using directly the primordial spectrum (when 0 <= k < kmin):
* the latter case is an approximation, valid when kmin << comoving Hubble scale today.
* Returns zero when k=0. Returns an error when k<0 or k > kmax.
*
* This function can be
* called from whatever module at whatever time, provided that
* spectra_init() has been called before, and spectra_free() has not
* been called yet.
*
* @param pba Input: pointer to background structure (used for converting z into tau)
* @param ppm Input: pointer to primordial structure (used only in the case 0 < k < kmin)
* @param psp Input: pointer to spectra structure (containing pre-computed table)
* @param k Input: wavenumber in 1/Mpc
* @param z Input: redshift
* @param pk_tot Output: total matter power spectrum P(k) in \f$ Mpc^3\f$
* @return the error status
*/
int spectra_pk_nl_at_k_and_z(
struct background * pba,
struct primordial * ppm,
struct spectra * psp,
double k,
double z,
double * pk_tot /* pointer to a single number (must be already allocated) */
) {
/** Summary: */
/** - define local variables */
int index_md;
int last_index;
double * spectrum_at_z = NULL;
double * spline;
index_md = psp->index_md_scalars;
/** - check that k is in valid range [0:kmax] (the test for z will be done when calling spectra_pk_at_z()) */
class_test((k < exp(psp->ln_k[0])) || (k > exp(psp->ln_k[psp->ln_k_size-1])),
psp->error_message,
"k=%e out of bounds [%e:%e]",k,0.,exp(psp->ln_k[psp->ln_k_size-1]));
/** - compute P(k,z) (in logarithmic format for more accurate interpolation) */
class_alloc(spectrum_at_z,
psp->ln_k_size*sizeof(double),
psp->error_message);
class_call(spectra_pk_nl_at_z(pba,
psp,
logarithmic,
z,
spectrum_at_z),
psp->error_message,
psp->error_message);
/** - get its second derivatives with spline, then interpolate, then convert to linear format */
class_alloc(spline,
sizeof(double)*psp->ic_ic_size[index_md]*psp->ln_k_size,
psp->error_message);
class_call(array_spline_table_lines(psp->ln_k,
psp->ln_k_size,
spectrum_at_z,
1,
spline,
_SPLINE_NATURAL_,
psp->error_message),
psp->error_message,
psp->error_message);
class_call(array_interpolate_spline(psp->ln_k,
psp->ln_k_size,
spectrum_at_z,
spline,
1,
log(k),
&last_index,
pk_tot,
1,
psp->error_message),
psp->error_message,
psp->error_message);
*pk_tot = exp(*pk_tot);
free(spectrum_at_z);
free(spline);
return _SUCCESS_;
}
/**
* Matter transfer functions \f$ T_i(k) \f$ for arbitrary redshift and for all
* initial conditions.
*
* This routine evaluates the matter transfer functions at a given value of z by
* interpolating in the pre-computed table (if several values of z have been stored)
* or by directly reading it (if it only contains values at z=0 and we want \f$ T_i(k,z=0)\f$)
*
*
* This function can be
* called from whatever module at whatever time, provided that
* spectra_init() has been called before, and spectra_free() has not
* been called yet.
*
* @param pba Input: pointer to background structure (used for converting z into tau)
* @param psp Input: pointer to spectra structure (containing pre-computed table)
* @param z Input: redshift
* @param output Output: matter transfer functions
* @return the error status
*/
int spectra_tk_at_z(
struct background * pba,
struct spectra * psp,
double z,
double * output /* array with argument output[(index_k*psp->ic_size[index_md]+index_ic)*psp->tr_size+index_tr] (must be already allocated) */
) {
/** Summary: */
/** - define local variables */
int index_md;
int last_index;
int index_k;
int index_tr;
double tau,ln_tau;
int index_ic;
index_md = psp->index_md_scalars;
/** - first step: convert z into ln(tau) */
class_call(background_tau_of_z(pba,z,&tau),
pba->error_message,
psp->error_message);
class_test(tau <= 0.,
psp->error_message,
"negative or null value of conformal time: cannot interpolate");
ln_tau = log(tau);
/** - second step: store the matter transfer functions in the output array */
/** - --> (a) if only values at tau=tau_today are stored and we want \f$ T_i(k,z=0)\f$, no need to interpolate */
if (psp->ln_tau_size == 1) {
class_test(z != 0.,
psp->error_message,
"asked z=%e but only T_i(k,z=0) has been tabulated",z);
for (index_k=0; index_k<psp->ln_k_size; index_k++)
for (index_tr=0; index_tr<psp->tr_size; index_tr++)
for (index_ic = 0; index_ic < psp->ic_size[index_md]; index_ic++)
output[(index_k*psp->ic_size[index_md]+index_ic)*psp->tr_size+index_tr]
= psp->matter_transfer[(index_k*psp->ic_size[index_md]+index_ic)*psp->tr_size+index_tr];
}
/** - --> (b) if several values of tau have been stored, use interpolation routine to get spectra at correct redshift */
else {
class_call(array_interpolate_spline(psp->ln_tau,
psp->ln_tau_size,
psp->matter_transfer,
psp->ddmatter_transfer,
psp->ic_size[index_md]*psp->tr_size*psp->ln_k_size,
ln_tau,
&last_index,
output,
psp->ic_size[index_md]*psp->tr_size*psp->ln_k_size,
psp->error_message),
psp->error_message,
psp->error_message);
}
return _SUCCESS_;
}
/**
* Matter transfer functions \f$ T_i(k)\f$ for arbitrary wavenumber, redshift
* and initial condition.
*
* This routine evaluates the matter transfer functions at a given
* value of k and z by interpolating in a table of all \f$ T_i(k,z)\f$'s
* computed at this z by spectra_tk_at_z() (when kmin <= k <= kmax).
* Returns an error when k<kmin or k > kmax.
*
* This function can be called from whatever module at whatever time,
* provided that spectra_init() has been called before, and
* spectra_free() has not been called yet.
*
* @param pba Input: pointer to background structure (used for converting z into tau)
* @param psp Input: pointer to spectra structure (containing pre-computed table)
* @param k Input: wavenumber in 1/Mpc
* @param z Input: redshift
* @param output Output: matter transfer functions
* @return the error status
*/
int spectra_tk_at_k_and_z(
struct background * pba,
struct spectra * psp,
double k,
double z,
double * output /* array with argument output[index_ic*psp->tr_size+index_tr] (must be already allocated) */
) {
/** Summary: */
/** - define local variables */
int index_md;
int last_index;
double * tks_at_z;
double * ddtks_at_z;
index_md = psp->index_md_scalars;
/** - check that k is in valid range [0:kmax] (the test for z will be done when calling spectra_tk_at_z()) */
class_test((k < 0.) || (k > exp(psp->ln_k[psp->ln_k_size-1])),
psp->error_message,
"k=%e out of bounds [%e:%e]",k,0.,exp(psp->ln_k[psp->ln_k_size-1]));
/** - compute T_i(k,z) */
class_alloc(tks_at_z,
psp->ln_k_size*psp->tr_size*psp->ic_size[index_md]*sizeof(double),
psp->error_message);
class_call(spectra_tk_at_z(pba,
psp,
z,
tks_at_z),
psp->error_message,
psp->error_message);
/** - get its second derivatives w.r.t. k with spline, then interpolate */
class_alloc(ddtks_at_z,
psp->ln_k_size*psp->tr_size*psp->ic_size[index_md]*sizeof(double),
psp->error_message);
class_call(array_spline_table_lines(psp->ln_k,
psp->ln_k_size,
tks_at_z,
psp->tr_size*psp->ic_size[index_md],
ddtks_at_z,
_SPLINE_NATURAL_,
psp->error_message),
psp->error_message,
psp->error_message);
class_call(array_interpolate_spline(psp->ln_k,
psp->ln_k_size,
tks_at_z,
ddtks_at_z,
psp->tr_size*psp->ic_size[index_md],
log(k),
&last_index,
output,
psp->tr_size*psp->ic_size[index_md],
psp->error_message),
psp->error_message,
psp->error_message);
free(tks_at_z);
free(ddtks_at_z);
return _SUCCESS_;
}
/**
* This routine initializes the spectra structure (in particular,
* computes table of anisotropy and Fourier spectra \f$ C_l^{X}, P(k), ... \f$)
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure (will provide H, Omega_m at redshift of interest)
* @param ppt Input: pointer to perturbation structure
* @param ptr Input: pointer to transfer structure
* @param ppm Input: pointer to primordial structure
* @param pnl Input: pointer to nonlinear structure
* @param psp Output: pointer to initialized spectra structure
* @return the error status
*/
int spectra_init(
struct precision * ppr,
struct background * pba,
struct perturbs * ppt,
struct primordial * ppm,
struct nonlinear *pnl,
struct transfers * ptr,
struct spectra * psp
) {
/** Summary: */
double TT_II,TT_RI,TT_RR;
int l1,l2;
/** - check that we really want to compute at least one spectrum */
if ((ppt->has_cls == _FALSE_) &&
(ppt->has_pk_matter == _FALSE_) &&
(ppt->has_density_transfers == _FALSE_) &&
(ppt->has_velocity_transfers == _FALSE_)) {
psp->md_size = 0;
if (psp->spectra_verbose > 0)
printf("No spectra requested. Spectra module skipped.\n");
return _SUCCESS_;
}
else {
if (psp->spectra_verbose > 0)
printf("Computing unlensed linear spectra\n");
}
/** - initialize indices and allocate some of the arrays in the
spectra structure */
class_call(spectra_indices(pba,ppt,ptr,ppm,psp),
psp->error_message,
psp->error_message);
/** - deal with \f$ C_l\f$'s, if any */
if (ppt->has_cls == _TRUE_) {
class_call(spectra_cls(pba,ppt,ptr,ppm,psp),
psp->error_message,
psp->error_message);
}
else {
psp->ct_size=0;
}
/** - deal with \f$ P(k,\tau)\f$ and \f$ T_i(k,\tau)\f$ */
if ((ppt->has_pk_matter == _TRUE_) || (ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_)) {
class_call(spectra_k_and_tau(pba,ppt,psp),
psp->error_message,
psp->error_message);
if (ppt->has_pk_matter == _TRUE_) {
class_call(spectra_pk(pba,ppt,ppm,pnl,psp),
psp->error_message,
psp->error_message);
}
else {
psp->ln_pk=NULL;
}
if ((ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_)) {
class_call(spectra_matter_transfers(pba,ppt,psp),
psp->error_message,
psp->error_message);
}
else {
psp->matter_transfer=NULL;
}
}
else {
psp->ln_k_size=0;
}
/* if there is one isocurvature mode, compute and store in the psp
structure the isocurvature contribution to some bandpowers in
different ranges of l, and the contribution to the primordial
spectrum at different wavenumbers (used in the Planck
analysis) */
if ((ppt->has_scalars == _TRUE_) && (ppt->has_cls == _TRUE_) && (ppt->ic_size[ppt->index_md_scalars] == 2)) {
l1=2;
l2=20;
class_call(spectra_bandpower(psp,l1,l2,&TT_II,&TT_RI,&TT_RR),
psp->error_message,
psp->error_message);
class_test(TT_II+TT_RI+TT_RR==0.,
psp->error_message,
"should never happen");
psp->alpha_II_2_20=TT_II/(TT_II+TT_RI+TT_RR);
psp->alpha_RI_2_20=TT_RI/(TT_II+TT_RI+TT_RR);
psp->alpha_RR_2_20=TT_RR/(TT_II+TT_RI+TT_RR);
l1=21;
l2=200;
class_call(spectra_bandpower(psp,l1,l2,&TT_II,&TT_RI,&TT_RR),
psp->error_message,
psp->error_message);
class_test(TT_II+TT_RI+TT_RR==0.,
psp->error_message,
"should never happen");
psp->alpha_II_21_200=TT_II/(TT_II+TT_RI+TT_RR);
psp->alpha_RI_21_200=TT_RI/(TT_II+TT_RI+TT_RR);
psp->alpha_RR_21_200=TT_RR/(TT_II+TT_RI+TT_RR);
l1=201;
l2=2500;
class_call(spectra_bandpower(psp,l1,l2,&TT_II,&TT_RI,&TT_RR),
psp->error_message,
psp->error_message);
class_test(TT_II+TT_RI+TT_RR==0.,
psp->error_message,
"should never happen");
psp->alpha_II_201_2500=TT_II/(TT_II+TT_RI+TT_RR);
psp->alpha_RI_201_2500=TT_RI/(TT_II+TT_RI+TT_RR);
psp->alpha_RR_201_2500=TT_RR/(TT_II+TT_RI+TT_RR);
l1=2;
l2=2500;
class_call(spectra_bandpower(psp,l1,l2,&TT_II,&TT_RI,&TT_RR),
psp->error_message,
psp->error_message);
class_test(TT_II+TT_RI+TT_RR==0.,
psp->error_message,
"should never happen");
psp->alpha_II_2_2500=TT_II/(TT_II+TT_RI+TT_RR);
psp->alpha_RI_2_2500=TT_RI/(TT_II+TT_RI+TT_RR);
psp->alpha_RR_2_2500=TT_RR/(TT_II+TT_RI+TT_RR);
if (ppt->has_cdi==_TRUE_) {
psp->alpha_kp=ppm->f_cdi*ppm->f_cdi
/(1.+ppm->f_cdi*ppm->f_cdi);
psp->alpha_k1=ppm->f_cdi*ppm->f_cdi*exp((ppm->n_cdi-ppm->n_s)*log(0.002/ppm->k_pivot))
/(1.+ppm->f_cdi*ppm->f_cdi*exp((ppm->n_cdi-ppm->n_s)*log(0.002/ppm->k_pivot)));
psp->alpha_k2=ppm->f_cdi*ppm->f_cdi*exp((ppm->n_cdi-ppm->n_s)*log(0.1/ppm->k_pivot))
/(1.+ppm->f_cdi*ppm->f_cdi*exp((ppm->n_cdi-ppm->n_s)*log(0.1/ppm->k_pivot)));
}
if (ppt->has_nid==_TRUE_) {
psp->alpha_kp=ppm->f_nid*ppm->f_nid
/(1.+ppm->f_nid*ppm->f_nid);
psp->alpha_k1=ppm->f_nid*ppm->f_nid*exp((ppm->n_nid-ppm->n_s)*log(0.002/ppm->k_pivot))
/(1.+ppm->f_nid*ppm->f_nid*exp((ppm->n_nid-ppm->n_s)*log(0.002/ppm->k_pivot)));
psp->alpha_k2=ppm->f_nid*ppm->f_nid*exp((ppm->n_nid-ppm->n_s)*log(0.1/ppm->k_pivot))
/(1.+ppm->f_nid*ppm->f_nid*exp((ppm->n_nid-ppm->n_s)*log(0.1/ppm->k_pivot)));
}
if (ppt->has_niv==_TRUE_) {
psp->alpha_kp=ppm->f_niv*ppm->f_niv
/(1.+ppm->f_niv*ppm->f_niv);
psp->alpha_k1=ppm->f_niv*ppm->f_niv*exp((ppm->n_niv-ppm->n_s)*log(0.002/ppm->k_pivot))
/(1.+ppm->f_niv*ppm->f_niv*exp((ppm->n_niv-ppm->n_s)*log(0.002/ppm->k_pivot)));
psp->alpha_k2=ppm->f_niv*ppm->f_niv*exp((ppm->n_niv-ppm->n_s)*log(0.1/ppm->k_pivot))
/(1.+ppm->f_niv*ppm->f_niv*exp((ppm->n_niv-ppm->n_s)*log(0.1/ppm->k_pivot)));
}
}
return _SUCCESS_;
}
/**
* This routine frees all the memory space allocated by spectra_init().
*
* To be called at the end of each run, only when no further calls to
* spectra_cls_at_l(), spectra_pk_at_z(), spectra_pk_at_k_and_z() are needed.
*
* @param psp Input: pointer to spectra structure (which fields must be freed)
* @return the error status
*/
int spectra_free(
struct spectra * psp
) {
int index_md;
if (psp->md_size > 0) {
if (psp->ct_size > 0) {
for (index_md = 0; index_md < psp->md_size; index_md++) {
free(psp->l_max_ct[index_md]);
free(psp->cl[index_md]);
free(psp->ddcl[index_md]);
}
free(psp->l);
free(psp->l_size);
free(psp->l_max_ct);
free(psp->l_max);
free(psp->cl);
free(psp->ddcl);
}
if (psp->ln_k_size > 0) {
free(psp->ln_tau);
free(psp->ln_k);
if (psp->ln_pk != NULL) {
free(psp->ln_pk);
if (psp->ln_tau_size > 1) {
free(psp->ddln_pk);
}
if (psp->ln_pk_nl != NULL) {
free(psp->ln_pk_nl);
if (psp->ln_tau_size > 1) {
free(psp->ddln_pk_nl);
}
}
}
if (psp->matter_transfer != NULL) {
free(psp->matter_transfer);
if (psp->ln_tau_size > 1) {
free(psp->ddmatter_transfer);
}
}
}
}
for (index_md=0; index_md < psp->md_size; index_md++)
free(psp->is_non_zero[index_md]);
free(psp->is_non_zero);
free(psp->ic_size);
free(psp->ic_ic_size);
return _SUCCESS_;
}
/**
* This routine defines indices and allocates tables in the spectra structure
*
* @param pba Input: pointer to background structure
* @param ppt Input: pointer to perturbation structure
* @param ptr Input: pointer to transfers structure
* @param ppm Input: pointer to primordial structure
* @param psp Input/output: pointer to spectra structure
* @return the error status
*/
int spectra_indices(
struct background * pba,
struct perturbs * ppt,
struct transfers * ptr,
struct primordial * ppm,
struct spectra * psp
){
int index_ct;
int index_md;
int index_ic1_ic2;
int index_tr;
psp->md_size = ppt->md_size;
if (ppt->has_scalars == _TRUE_)
psp->index_md_scalars = ppt->index_md_scalars;
class_alloc(psp->ic_size,
sizeof(int)*psp->md_size,
psp->error_message);
class_alloc(psp->ic_ic_size,
sizeof(int)*psp->md_size,
psp->error_message);
class_alloc(psp->is_non_zero,
sizeof(short *)*psp->md_size,
psp->error_message);
for (index_md=0; index_md < psp->md_size; index_md++) {
psp->ic_size[index_md] = ppm->ic_size[index_md];
psp->ic_ic_size[index_md] = ppm->ic_ic_size[index_md];
class_alloc(psp->is_non_zero[index_md],
sizeof(short)*psp->ic_ic_size[index_md],
psp->error_message);
for (index_ic1_ic2=0; index_ic1_ic2 < psp->ic_ic_size[index_md]; index_ic1_ic2++)
psp->is_non_zero[index_md][index_ic1_ic2] = ppm->is_non_zero[index_md][index_ic1_ic2];
}
if (ppt->has_cls == _TRUE_) {
/* types of C_l's relevant for both scalars and tensors: TT, EE, TE */
index_ct=0;
if (ppt->has_cl_cmb_temperature == _TRUE_) {
psp->has_tt = _TRUE_;
psp->index_ct_tt=index_ct;
index_ct++;
}
else {
psp->has_tt = _FALSE_;
}
if (ppt->has_cl_cmb_polarization == _TRUE_) {
psp->has_ee = _TRUE_;
psp->index_ct_ee=index_ct;
index_ct++;
}
else {
psp->has_ee = _FALSE_;
}
if ((ppt->has_cl_cmb_temperature == _TRUE_) &&
(ppt->has_cl_cmb_polarization == _TRUE_)) {
psp->has_te = _TRUE_;
psp->index_ct_te=index_ct;
index_ct++;
}
else {
psp->has_te = _FALSE_;
}
if (ppt->has_cl_cmb_polarization == _TRUE_) {
psp->has_bb = _TRUE_;
psp->index_ct_bb=index_ct;
index_ct++;
}
else {
psp->has_bb = _FALSE_;
}
/* types of C_l's relevant only for scalars: phi-phi, T-phi, E-phi, d-d, T-d */
if ((ppt->has_cl_cmb_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) {
psp->has_pp = _TRUE_;
psp->index_ct_pp=index_ct;
index_ct++;
}
else {
psp->has_pp = _FALSE_;
}
if ((ppt->has_cl_cmb_temperature == _TRUE_) && (ppt->has_cl_cmb_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) {
psp->has_tp = _TRUE_;
psp->index_ct_tp=index_ct;
index_ct++;
}
else {
psp->has_tp = _FALSE_;
}
psp->ct_size = index_ct;
if ((ppt->has_cl_cmb_polarization == _TRUE_) && (ppt->has_cl_cmb_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) {
psp->has_ep = _TRUE_;
psp->index_ct_ep=index_ct;
index_ct++;
}
else {
psp->has_ep = _FALSE_;
}
if ((ppt->has_scalars == _TRUE_) &&
((ppt->has_cl_number_count == _TRUE_) || (ppt->has_cl_lensing_potential == _TRUE_)))
psp->d_size=ppt->selection_num;
else
psp->d_size=0;
if ((ppt->has_cl_number_count == _TRUE_) && (ppt->has_scalars == _TRUE_)) {
psp->has_dd = _TRUE_;
psp->index_ct_dd=index_ct;
index_ct+=(psp->d_size*(psp->d_size+1)-(psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag))/2;
}
else {
psp->has_dd = _FALSE_;
}
/* the computation of C_l^Td would require a very good sampling of
transfer functions over a wide range, and a huge computation
time. In the current version, we prefer to switch it off, rather
than either slowing down the code considerably, or producing
very inaccurate spectra.
if ((ppt->has_cl_cmb_temperature == _TRUE_) && (ppt->has_cl_number_count == _TRUE_) && (ppt->has_scalars == _TRUE_)) {
psp->has_td = _TRUE_;
psp->index_ct_td=index_ct;
index_ct+=psp->d_size;
}
else {
psp->has_td = _FALSE_;
}
*/
psp->has_td = _FALSE_;
if ((ppt->has_cl_cmb_lensing_potential == _TRUE_) && (ppt->has_cl_number_count == _TRUE_) && (ppt->has_scalars == _TRUE_)) {
psp->has_pd = _TRUE_;
psp->index_ct_pd=index_ct;
index_ct+=psp->d_size;
}
else {
psp->has_pd = _FALSE_;
}
psp->has_td = _FALSE_;
if ((ppt->has_cl_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) {
psp->has_ll = _TRUE_;
psp->index_ct_ll=index_ct;
index_ct+=(psp->d_size*(psp->d_size+1)-(psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag))/2;
}
else {
psp->has_ll = _FALSE_;
}
/* the computation of C_l^Tl would require a very good sampling of
transfer functions over a wide range, and a huge computation
time. In the current version, we prefer to switch it off, rather
than either slowing down the code considerably, or producing
very inaccurate spectra.
if ((ppt->has_cl_cmb_temperature == _TRUE_) && (ppt->has_cl_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) {
psp->has_tl = _TRUE_;
psp->index_ct_tl=index_ct;
index_ct+=psp->d_size;
}
else {
psp->has_tl = _FALSE_;
}
*/
psp->has_tl = _FALSE_;
if ((ppt->has_cl_number_count == _TRUE_) && (ppt->has_cl_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) {
psp->has_dl = _TRUE_;
psp->index_ct_dl=index_ct;
index_ct += psp->d_size*psp->d_size - (psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag);
}
else {
psp->has_dl = _FALSE_;
}
psp->ct_size = index_ct;
/* infer from input quantities the l_max for each mode and type,
l_max_ct[index_md][index_type]. Maximize it over index_ct, and
then over index_md. */
class_alloc(psp->l_max,sizeof(int*)*psp->md_size,psp->error_message);
class_alloc(psp->l_max_ct,sizeof(int*)*psp->md_size,psp->error_message);
for (index_md=0; index_md<psp->md_size; index_md++) {
class_calloc(psp->l_max_ct[index_md],psp->ct_size,sizeof(int),psp->error_message);
}
if (ppt->has_scalars == _TRUE_) {
/* spectra computed up to l_scalar_max */
if (psp->has_tt == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_tt] = ppt->l_scalar_max;
if (psp->has_ee == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_ee] = ppt->l_scalar_max;
if (psp->has_te == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_te] = ppt->l_scalar_max;
if (psp->has_pp == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_pp] = ppt->l_scalar_max;
if (psp->has_tp == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_tp] = ppt->l_scalar_max;
if (psp->has_ep == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_ep] = ppt->l_scalar_max;
/* spectra computed up to l_lss_max */
if (psp->has_dd == _TRUE_)
for (index_ct=psp->index_ct_dd;
index_ct<psp->index_ct_dd+(psp->d_size*(psp->d_size+1)-(psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag))/2;
index_ct++)
psp->l_max_ct[ppt->index_md_scalars][index_ct] = ppt->l_lss_max;
if (psp->has_td == _TRUE_)
for (index_ct=psp->index_ct_td;
index_ct<psp->index_ct_td+psp->d_size;
index_ct++)
psp->l_max_ct[ppt->index_md_scalars][index_ct] = MIN(ppt->l_scalar_max,ppt->l_lss_max);
if (psp->has_pd == _TRUE_)
for (index_ct=psp->index_ct_pd;
index_ct<psp->index_ct_pd+psp->d_size;
index_ct++)
psp->l_max_ct[ppt->index_md_scalars][index_ct] = MIN(ppt->l_scalar_max,ppt->l_lss_max);
if (psp->has_ll == _TRUE_)
for (index_ct=psp->index_ct_ll;
index_ct<psp->index_ct_ll+(psp->d_size*(psp->d_size+1)-(psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag))/2;
index_ct++)
psp->l_max_ct[ppt->index_md_scalars][index_ct] = ppt->l_lss_max;
if (psp->has_tl == _TRUE_)
for (index_ct=psp->index_ct_tl;
index_ct<psp->index_ct_tl+psp->d_size;
index_ct++)
psp->l_max_ct[ppt->index_md_scalars][index_ct] = MIN(ppt->l_scalar_max,ppt->l_lss_max);
if (psp->has_dl == _TRUE_)
for (index_ct=psp->index_ct_dl;
index_ct < psp->index_ct_dl+(psp->d_size*psp->d_size - (psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag));
index_ct++)
psp->l_max_ct[ppt->index_md_scalars][index_ct] = ppt->l_lss_max;
}
if (ppt->has_tensors == _TRUE_) {
/* spectra computed up to l_tensor_max */
if (psp->has_tt == _TRUE_) psp->l_max_ct[ppt->index_md_tensors][psp->index_ct_tt] = ppt->l_tensor_max;
if (psp->has_ee == _TRUE_) psp->l_max_ct[ppt->index_md_tensors][psp->index_ct_ee] = ppt->l_tensor_max;
if (psp->has_te == _TRUE_) psp->l_max_ct[ppt->index_md_tensors][psp->index_ct_te] = ppt->l_tensor_max;
if (psp->has_bb == _TRUE_) psp->l_max_ct[ppt->index_md_tensors][psp->index_ct_bb] = ppt->l_tensor_max;
}
/* maximizations */
psp->l_max_tot = 0.;
for (index_md=0; index_md < psp->md_size; index_md++) {
psp->l_max[index_md] = 0.;
for (index_ct=0.; index_ct<psp->ct_size; index_ct++)
psp->l_max[index_md] = MAX(psp->l_max[index_md],psp->l_max_ct[index_md][index_ct]);
psp->l_max_tot = MAX(psp->l_max_tot,psp->l_max[index_md]);
}
}
/* indices for species associated with a matter transfer function in Fourier space */
index_tr=0;
class_define_index(psp->index_tr_delta_g,ppt->has_source_delta_g,index_tr,1);
class_define_index(psp->index_tr_delta_b,ppt->has_source_delta_b,index_tr,1);
class_define_index(psp->index_tr_delta_cdm,ppt->has_source_delta_cdm,index_tr,1);
class_define_index(psp->index_tr_delta_dcdm,ppt->has_source_delta_dcdm,index_tr,1);
class_define_index(psp->index_tr_delta_scf,ppt->has_source_delta_scf,index_tr,1);
class_define_index(psp->index_tr_delta_fld,ppt->has_source_delta_fld,index_tr,1);
class_define_index(psp->index_tr_delta_ur,ppt->has_source_delta_ur,index_tr,1);
class_define_index(psp->index_tr_delta_dr,ppt->has_source_delta_dr,index_tr,1);
class_define_index(psp->index_tr_delta_ncdm1,ppt->has_source_delta_ncdm,index_tr,pba->N_ncdm);
class_define_index(psp->index_tr_delta_tot,ppt->has_density_transfers,index_tr,1);
class_define_index(psp->index_tr_phi,ppt->has_source_phi,index_tr,1);
class_define_index(psp->index_tr_psi,ppt->has_source_psi,index_tr,1);
/* indices for species associated with a velocity transfer function in Fourier space */
class_define_index(psp->index_tr_theta_g,ppt->has_source_theta_g,index_tr,1);
class_define_index(psp->index_tr_theta_b,ppt->has_source_theta_b,index_tr,1);
class_define_index(psp->index_tr_theta_cdm,ppt->has_source_theta_cdm,index_tr,1);
class_define_index(psp->index_tr_theta_dcdm,ppt->has_source_theta_dcdm,index_tr,1);
class_define_index(psp->index_tr_theta_scf,ppt->has_source_theta_scf,index_tr,1);
class_define_index(psp->index_tr_theta_fld,ppt->has_source_theta_fld,index_tr,1);
class_define_index(psp->index_tr_theta_ur,ppt->has_source_theta_ur,index_tr,1);
class_define_index(psp->index_tr_theta_dr,ppt->has_source_theta_dr,index_tr,1);
class_define_index(psp->index_tr_theta_ncdm1,ppt->has_source_theta_ncdm,index_tr,pba->N_ncdm);
class_define_index(psp->index_tr_theta_tot,ppt->has_velocity_transfers,index_tr,1);
psp->tr_size = index_tr;
return _SUCCESS_;
}
/**
* This routine computes a table of values for all harmonic spectra \f$ C_l \f$'s,
* given the transfer functions and primordial spectra.
*
* @param pba Input: pointer to background structure
* @param ppt Input: pointer to perturbation structure
* @param ptr Input: pointer to transfers structure
* @param ppm Input: pointer to primordial structure
* @param psp Input/Output: pointer to spectra structure
* @return the error status
*/
int spectra_cls(
struct background * pba,
struct perturbs * ppt,
struct transfers * ptr,
struct primordial * ppm,
struct spectra * psp
) {
/** Summary: */
/** - define local variables */
int index_md;
int index_ic1,index_ic2,index_ic1_ic2;
int index_l;
int index_ct;
int cl_integrand_num_columns;
double * cl_integrand; /* array with argument cl_integrand[index_k*cl_integrand_num_columns+1+psp->index_ct] */
double * transfer_ic1; /* array with argument transfer_ic1[index_tt] */
double * transfer_ic2; /* idem */
double * primordial_pk; /* array with argument primordial_pk[index_ic_ic]*/
/* This code can be optionally compiled with the openmp option for parallel computation.
Inside parallel regions, the use of the command "return" is forbidden.
For error management, instead of "return _FAILURE_", we will set the variable below
to "abort = _TRUE_". This will lead to a "return _FAILURE_" jus after leaving the
parallel region. */
int abort;
#ifdef _OPENMP
/* instrumentation times */
double tstart, tstop;
#endif
/** - allocate pointers to arrays where results will be stored */
class_alloc(psp->l_size,sizeof(int)*psp->md_size,psp->error_message);
class_alloc(psp->cl,sizeof(double *)*psp->md_size,psp->error_message);
class_alloc(psp->ddcl,sizeof(double *)*psp->md_size,psp->error_message);
psp->l_size_max = ptr->l_size_max;
class_alloc(psp->l,sizeof(double)*psp->l_size_max,psp->error_message);
/** - store values of l */
for (index_l=0; index_l < psp->l_size_max; index_l++) {
psp->l[index_l] = (double)ptr->l[index_l];
}
/** - loop over modes (scalar, tensors, etc). For each mode: */
for (index_md = 0; index_md < psp->md_size; index_md++) {
/** - --> (a) store number of l values for this mode */
psp->l_size[index_md] = ptr->l_size[index_md];
/** - --> (b) allocate arrays where results will be stored */
class_alloc(psp->cl[index_md],sizeof(double)*psp->l_size[index_md]*psp->ct_size*psp->ic_ic_size[index_md],psp->error_message);
class_alloc(psp->ddcl[index_md],sizeof(double)*psp->l_size[index_md]*psp->ct_size*psp->ic_ic_size[index_md],psp->error_message);
cl_integrand_num_columns = 1+psp->ct_size*2; /* one for k, ct_size for each type, ct_size for each second derivative of each type */
/** - --> (c) loop over initial conditions */
for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]);
/* non-diagonal coefficients should be computed only if non-zero correlation */
if (psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) {
/* initialize error management flag */
abort = _FALSE_;
/* beginning of parallel region */
#pragma omp parallel \
shared(ptr,ppm,index_md,psp,ppt,cl_integrand_num_columns,index_ic1,index_ic2,abort) \
private(tstart,cl_integrand,primordial_pk,transfer_ic1,transfer_ic2,index_l,tstop)
{
#ifdef _OPENMP
tstart = omp_get_wtime();
#endif
class_alloc_parallel(cl_integrand,
ptr->q_size*cl_integrand_num_columns*sizeof(double),
psp->error_message);
class_alloc_parallel(primordial_pk,
psp->ic_ic_size[index_md]*sizeof(double),
psp->error_message);
class_alloc_parallel(transfer_ic1,
ptr->tt_size[index_md]*sizeof(double),
psp->error_message);
class_alloc_parallel(transfer_ic2,
ptr->tt_size[index_md]*sizeof(double),
psp->error_message);
#pragma omp for schedule (dynamic)
/** - ---> loop over l values defined in the transfer module.
For each l, compute the \f$ C_l\f$'s for all types (TT, TE, ...)
by convolving primordial spectra with transfer functions.
This elementary task is assigned to spectra_compute_cl() */
for (index_l=0; index_l < ptr->l_size[index_md]; index_l++) {
#pragma omp flush(abort)
class_call_parallel(spectra_compute_cl(pba,
ppt,
ptr,
ppm,
psp,
index_md,
index_ic1,
index_ic2,
index_l,
cl_integrand_num_columns,
cl_integrand,
primordial_pk,
transfer_ic1,
transfer_ic2),
psp->error_message,
psp->error_message);
} /* end of loop over l */
#ifdef _OPENMP
tstop = omp_get_wtime();
if (psp->spectra_verbose > 1)
printf("In %s: time spent in parallel region (loop over l's) = %e s for thread %d\n",
__func__,tstop-tstart,omp_get_thread_num());
#endif
free(cl_integrand);
free(primordial_pk);
free(transfer_ic1);
free(transfer_ic2);
} /* end of parallel region */
if (abort == _TRUE_) return _FAILURE_;
}
else {
/* set non-diagonal coefficients to zero if pair of ic's uncorrelated */
for (index_l=0; index_l < ptr->l_size[index_md]; index_l++) {
for (index_ct=0; index_ct<psp->ct_size; index_ct++) {
psp->cl[index_md]
[(index_l * psp->ic_ic_size[index_md] + index_ic1_ic2) * psp->ct_size + index_ct]
= 0.;
}
}
}
}
}
/** - --> (d) now that for a given mode, all possible \f$ C_l\f$'s have been computed,
compute second derivative of the array in which they are stored,
in view of spline interpolation. */
class_call(array_spline_table_lines(psp->l,
psp->l_size[index_md],
psp->cl[index_md],
psp->ic_ic_size[index_md]*psp->ct_size,
psp->ddcl[index_md],
_SPLINE_EST_DERIV_,
psp->error_message),
psp->error_message,
psp->error_message);
}
return _SUCCESS_;
}
/**
* This routine computes the \f$ C_l\f$'s for a given mode, pair of initial conditions
* and multipole, but for all types (TT, TE...), by convolving the
* transfer functions with the primordial spectra.
*
* @param pba Input: pointer to background structure
* @param ppt Input: pointer to perturbation structure
* @param ptr Input: pointer to transfers structure
* @param ppm Input: pointer to primordial structure
* @param psp Input/Output: pointer to spectra structure (result stored here)
* @param index_md Input: index of mode under consideration
* @param index_ic1 Input: index of first initial condition in the correlator
* @param index_ic2 Input: index of second initial condition in the correlator
* @param index_l Input: index of multipole under consideration
* @param cl_integrand_num_columns Input: number of columns in cl_integrand
* @param cl_integrand Input: an allocated workspace
* @param primordial_pk Input: table of primordial spectrum values
* @param transfer_ic1 Input: table of transfer function values for first initial condition
* @param transfer_ic2 Input: table of transfer function values for second initial condition
* @return the error status
*/
int spectra_compute_cl(
struct background * pba,
struct perturbs * ppt,
struct transfers * ptr,
struct primordial * ppm,
struct spectra * psp,
int index_md,
int index_ic1,
int index_ic2,
int index_l,
int cl_integrand_num_columns,
double * cl_integrand,
double * primordial_pk,
double * transfer_ic1,
double * transfer_ic2
) {
int index_q;
int index_tt;
int index_ct;
int index_d1,index_d2;
double k;
double clvalue;
int index_ic1_ic2;
double transfer_ic1_temp=0.;
double transfer_ic2_temp=0.;
double * transfer_ic1_nc=NULL;
double * transfer_ic2_nc=NULL;
double factor;
int index_q_spline=0;
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]);
if (ppt->has_cl_number_count == _TRUE_) {
class_alloc(transfer_ic1_nc,psp->d_size*sizeof(double),psp->error_message);
class_alloc(transfer_ic2_nc,psp->d_size*sizeof(double),psp->error_message);
}
for (index_q=0; index_q < ptr->q_size; index_q++) {
//q = ptr->q[index_q];
k = ptr->k[index_md][index_q];
cl_integrand[index_q*cl_integrand_num_columns+0] = k;
class_call(primordial_spectrum_at_k(ppm,index_md,linear,k,primordial_pk),
ppm->error_message,
psp->error_message);
/* above routine checks that k>0: no possible division by zero below */
for (index_tt=0; index_tt < ptr->tt_size[index_md]; index_tt++) {
transfer_ic1[index_tt] =
ptr->transfer[index_md]
[((index_ic1 * ptr->tt_size[index_md] + index_tt)
* ptr->l_size[index_md] + index_l)
* ptr->q_size + index_q];
if (index_ic1 == index_ic2) {
transfer_ic2[index_tt] = transfer_ic1[index_tt];
}
else {
transfer_ic2[index_tt] = ptr->transfer[index_md]
[((index_ic2 * ptr->tt_size[index_md] + index_tt)
* ptr->l_size[index_md] + index_l)
* ptr->q_size + index_q];
}
}
/* define combinations of transfer functions */
if (ppt->has_cl_cmb_temperature == _TRUE_) {
if (_scalars_) {
transfer_ic1_temp = transfer_ic1[ptr->index_tt_t0] + transfer_ic1[ptr->index_tt_t1] + transfer_ic1[ptr->index_tt_t2];
transfer_ic2_temp = transfer_ic2[ptr->index_tt_t0] + transfer_ic2[ptr->index_tt_t1] + transfer_ic2[ptr->index_tt_t2];
}
if (_vectors_) {
transfer_ic1_temp = transfer_ic1[ptr->index_tt_t1] + transfer_ic1[ptr->index_tt_t2];
transfer_ic2_temp = transfer_ic2[ptr->index_tt_t1] + transfer_ic2[ptr->index_tt_t2];
}
if (_tensors_) {
transfer_ic1_temp = transfer_ic1[ptr->index_tt_t2];
transfer_ic2_temp = transfer_ic2[ptr->index_tt_t2];
}
}
if (ppt->has_cl_number_count == _TRUE_) {
for (index_d1=0; index_d1<psp->d_size; index_d1++) {
transfer_ic1_nc[index_d1] = 0.;
transfer_ic2_nc[index_d1] = 0.;
if (ppt->has_nc_density == _TRUE_) {
transfer_ic1_nc[index_d1] += transfer_ic1[ptr->index_tt_density+index_d1];
transfer_ic2_nc[index_d1] += transfer_ic2[ptr->index_tt_density+index_d1];
}
if (ppt->has_nc_rsd == _TRUE_) {
transfer_ic1_nc[index_d1]
+= transfer_ic1[ptr->index_tt_rsd+index_d1]
+ transfer_ic1[ptr->index_tt_d0+index_d1]
+ transfer_ic1[ptr->index_tt_d1+index_d1];
transfer_ic2_nc[index_d1]
+= transfer_ic2[ptr->index_tt_rsd+index_d1]
+ transfer_ic2[ptr->index_tt_d0+index_d1]
+ transfer_ic2[ptr->index_tt_d1+index_d1];
}
if (ppt->has_nc_lens == _TRUE_) {
transfer_ic1_nc[index_d1] +=
psp->l[index_l]*(psp->l[index_l]+1.)*transfer_ic1[ptr->index_tt_nc_lens+index_d1];
transfer_ic2_nc[index_d1] +=
psp->l[index_l]*(psp->l[index_l]+1.)*transfer_ic2[ptr->index_tt_nc_lens+index_d1];
}
if (ppt->has_nc_gr == _TRUE_) {
transfer_ic1_nc[index_d1]
+= transfer_ic1[ptr->index_tt_nc_g1+index_d1]
+ transfer_ic1[ptr->index_tt_nc_g2+index_d1]
+ transfer_ic1[ptr->index_tt_nc_g3+index_d1]
+ transfer_ic1[ptr->index_tt_nc_g4+index_d1]
+ transfer_ic1[ptr->index_tt_nc_g5+index_d1];
transfer_ic2_nc[index_d1]
+= transfer_ic2[ptr->index_tt_nc_g1+index_d1]
+ transfer_ic2[ptr->index_tt_nc_g2+index_d1]
+ transfer_ic2[ptr->index_tt_nc_g3+index_d1]
+ transfer_ic2[ptr->index_tt_nc_g4+index_d1]
+ transfer_ic2[ptr->index_tt_nc_g5+index_d1];
}
}
}
/* integrand of Cl's */
/* note: we must integrate
C_l = int [4 pi dk/k calP(k) Delta1_l(q) Delta2_l(q)]
where calP(k) is the dimensionless
power spectrum equal to a constant in the scale-invariant case,
and to P(k) = A_s k^(ns-1) otherwise and q=sqrt(k2+K) (scalars)
or sqrt(k2+2K) (vectors) or sqrt(k2+3K) (tensors)
In the literature, people often rewrite the integral in terms
of q and absorb the Jacobian of the change of variables in a redefinition of the primodial
spectrum. Let us illustrate this for scalars:
dk/k = kdk/k2 = qdq/k2 = dq/q * (q/k)^2 = dq/q * [q2/(q2-K)] = q2dq * 1/[q(q2-K)]
This factor 1/[q(q2-K)] is commonly absorbed in the definition of calP. Then one would have
C_l = int [4 pi q2 dq {A_s k^(ns-1)/[q(q2-K)]} Delta1_l(q) Delta2_l(q)]
Sometimes in the literature, the factor (k2-3K)=(q2-4K) present
in the initial conditions of scalar transfer functions (if
normalized to curvature R=1) is also absorbed in the definition
of the power spectrum. Then the curvature power spectrum reads
calP = (q2-4K)/[q(q2-K)] * (k/k)^ns
In CLASS we prefer to define calP = (k/k)^ns like in the flat
case, to have the factor (q2-4K) in the initialk conditions,
and the factor 1/[q(q2-K)] doesn't need to be there since we
integrate over dk/k.
For tensors, the change of variable described above gives a slightly different result:
dk/k = kdk/k2 = qdq/k2 = dq/q * (q/k)^2 = dq/q * [q2/(q2-3K)] = q2dq * 1/[q(q2-3K)]
But for tensors there are extra curvature-related correction factors to
take into account. See the comments in the perturbation module,
related to initial conditions for tensors.
*/
factor = 4. * _PI_ / k;
if (psp->has_tt == _TRUE_)
cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_tt]=
primordial_pk[index_ic1_ic2]
* transfer_ic1_temp
* transfer_ic2_temp
* factor;
if (psp->has_ee == _TRUE_)
cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_ee]=
primordial_pk[index_ic1_ic2]
* transfer_ic1[ptr->index_tt_e]
* transfer_ic2[ptr->index_tt_e]
* factor;
if (psp->has_te == _TRUE_)
cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_te]=
primordial_pk[index_ic1_ic2]
* 0.5*(transfer_ic1_temp * transfer_ic2[ptr->index_tt_e] +
transfer_ic1[ptr->index_tt_e] * transfer_ic2_temp)
* factor;
if (_tensors_ && (psp->has_bb == _TRUE_))
cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_bb]=
primordial_pk[index_ic1_ic2]
* transfer_ic1[ptr->index_tt_b]
* transfer_ic2[ptr->index_tt_b]
* factor;
if (_scalars_ && (psp->has_pp == _TRUE_))
cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_pp]=
primordial_pk[index_ic1_ic2]
* transfer_ic1[ptr->index_tt_lcmb]
* transfer_ic2[ptr->index_tt_lcmb]
* factor;
if (_scalars_ && (psp->has_tp == _TRUE_))
cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_tp]=
primordial_pk[index_ic1_ic2]
* 0.5*(transfer_ic1_temp * transfer_ic2[ptr->index_tt_lcmb] +
transfer_ic1[ptr->index_tt_lcmb] * transfer_ic2_temp)
* factor;
if (_scalars_ && (psp->has_ep == _TRUE_))
cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_ep]=
primordial_pk[index_ic1_ic2]
* 0.5*(transfer_ic1[ptr->index_tt_e] * transfer_ic2[ptr->index_tt_lcmb] +
transfer_ic1[ptr->index_tt_lcmb] * transfer_ic2[ptr->index_tt_e])
* factor;
if (_scalars_ && (psp->has_dd == _TRUE_)) {
index_ct=0;
for (index_d1=0; index_d1<psp->d_size; index_d1++) {
for (index_d2=index_d1; index_d2<=MIN(index_d1+psp->non_diag,psp->d_size-1); index_d2++) {
cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_dd+index_ct]=
primordial_pk[index_ic1_ic2]
* transfer_ic1_nc[index_d1]
* transfer_ic2_nc[index_d2]
* factor;
index_ct++;
}
}
}
if (_scalars_ && (psp->has_td == _TRUE_)) {
for (index_d1=0; index_d1<psp->d_size; index_d1++) {
cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_td+index_d1]=
primordial_pk[index_ic1_ic2]
* 0.5*(transfer_ic1_temp * transfer_ic2_nc[index_d1] +
transfer_ic1_nc[index_d1] * transfer_ic2_temp)
* factor;
}
}
if (_scalars_ && (psp->has_pd == _TRUE_)) {
for (index_d1=0; index_d1<psp->d_size; index_d1++) {
cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_pd+index_d1]=
primordial_pk[index_ic1_ic2]
* 0.5*(transfer_ic1[ptr->index_tt_lcmb] * transfer_ic2_nc[index_d1] +
transfer_ic1_nc[index_d1] * transfer_ic2[ptr->index_tt_lcmb])
* factor;
}
}
if (_scalars_ && (psp->has_ll == _TRUE_)) {
index_ct=0;
for (index_d1=0; index_d1<psp->d_size; index_d1++) {
for (index_d2=index_d1; index_d2<=MIN(index_d1+psp->non_diag,psp->d_size-1); index_d2++) {
cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_ll+index_ct]=
primordial_pk[index_ic1_ic2]
* transfer_ic1[ptr->index_tt_lensing+index_d1]
* transfer_ic2[ptr->index_tt_lensing+index_d2]
* factor;
index_ct++;
}
}
}
if (_scalars_ && (psp->has_tl == _TRUE_)) {
for (index_d1=0; index_d1<psp->d_size; index_d1++) {
cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_tl+index_d1]=
primordial_pk[index_ic1_ic2]
* 0.5*(transfer_ic1_temp * transfer_ic2[ptr->index_tt_lensing+index_d1] +
transfer_ic1[ptr->index_tt_lensing+index_d1] * transfer_ic2_temp)
* factor;
}
}
if (_scalars_ && (psp->has_dl == _TRUE_)) {
index_ct=0;
for (index_d1=0; index_d1<psp->d_size; index_d1++) {
for (index_d2=MAX(index_d1-psp->non_diag,0); index_d2<=MIN(index_d1+psp->non_diag,psp->d_size-1); index_d2++) {
cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_dl+index_ct]=
primordial_pk[index_ic1_ic2]
* transfer_ic1_nc[index_d1] * transfer_ic2[ptr->index_tt_lensing+index_d2]
* factor;
index_ct++;
}
}
}
}
for (index_ct=0; index_ct<psp->ct_size; index_ct++) {
/* treat null spectra (C_l^BB of scalars, C_l^pp of tensors, etc. */
if ((_scalars_ && (psp->has_bb == _TRUE_) && (index_ct == psp->index_ct_bb)) ||
(_tensors_ && (psp->has_pp == _TRUE_) && (index_ct == psp->index_ct_pp)) ||
(_tensors_ && (psp->has_tp == _TRUE_) && (index_ct == psp->index_ct_tp)) ||
(_tensors_ && (psp->has_ep == _TRUE_) && (index_ct == psp->index_ct_ep)) ||
(_tensors_ && (psp->has_dd == _TRUE_) && (index_ct == psp->index_ct_dd)) ||
(_tensors_ && (psp->has_td == _TRUE_) && (index_ct == psp->index_ct_td)) ||
(_tensors_ && (psp->has_pd == _TRUE_) && (index_ct == psp->index_ct_pd)) ||
(_tensors_ && (psp->has_ll == _TRUE_) && (index_ct == psp->index_ct_ll)) ||
(_tensors_ && (psp->has_tl == _TRUE_) && (index_ct == psp->index_ct_tl)) ||
(_tensors_ && (psp->has_dl == _TRUE_) && (index_ct == psp->index_ct_dl))
) {
psp->cl[index_md]
[(index_l * psp->ic_ic_size[index_md] + index_ic1_ic2) * psp->ct_size + index_ct] = 0.;
}
/* for non-zero spectra, integrate over q */
else {
/* spline the integrand over the whole range of k's */
class_call(array_spline(cl_integrand,
cl_integrand_num_columns,
ptr->q_size,
0,
1+index_ct,
1+psp->ct_size+index_ct,
_SPLINE_EST_DERIV_,
psp->error_message),
psp->error_message,
psp->error_message);
/* Technical point: we will now do a spline integral over the
whole range of k's, excepted in the closed (K>0) case. In
that case, it is a bad idea to spline over the values of k
corresponding to nu<nu_flat_approximation. In this region, nu
values are integer values, so the steps dq and dk have some
discrete jumps. This makes the spline routine less accurate
than a trapezoidal integral with finer sampling. So, in the
closed case, we set index_q_spline to
ptr->index_q_flat_approximation, to tell the integration
routine that below this index, it should treat the integral
as a trapezoidal one. For testing, one is free to set
index_q_spline to 0, to enforce spline integration
everywhere, or to (ptr->q_size-1), to enforce trapezoidal
integration everywhere. */
if (pba->sgnK == 1) {
index_q_spline = ptr->index_q_flat_approximation;
}
class_call(array_integrate_all_trapzd_or_spline(cl_integrand,
cl_integrand_num_columns,
ptr->q_size,
index_q_spline,
0,
1+index_ct,
1+psp->ct_size+index_ct,
&clvalue,
psp->error_message),
psp->error_message,
psp->error_message);
/* in the closed case, instead of an integral, we have a
discrete sum. In practice, this does not matter: the previous
routine does give a correct approximation of the discrete
sum, both in the trapezoidal and spline regions. The only
error comes from the first point: the previous routine
assumes a weight for the first point which is too small
compared to what it would be in the an actual discrete
sum. The line below correct this problem in an exact way.
*/
if (pba->sgnK == 1) {
clvalue += cl_integrand[1+index_ct] * ptr->q[0]/ptr->k[0][0]*sqrt(pba->K)/2.;
}
/* we have the correct C_l now. We can store it in the transfer structure. */
psp->cl[index_md]
[(index_l * psp->ic_ic_size[index_md] + index_ic1_ic2) * psp->ct_size + index_ct]
= clvalue;
}
}
if (ppt->has_cl_number_count == _TRUE_) {
free(transfer_ic1_nc);
free(transfer_ic2_nc);
}
return _SUCCESS_;
}
/**
* This routine computes the values of k and tau at which the matter
* power spectra \f$ P(k,\tau)\f$ and the matter transfer functions \f$ T_i(k,\tau)\f$
* will be stored.
*
* @param pba Input: pointer to background structure (for z to tau conversion)
* @param ppt Input: pointer to perturbation structure (contain source functions)
* @param psp Input/Output: pointer to spectra structure
* @return the error status
*/
int spectra_k_and_tau(
struct background * pba,
struct perturbs * ppt,
struct spectra * psp
) {
/** Summary: */
/** - define local variables */
int index_k;
int index_tau;
double tau_min;
/** - check the presence of scalar modes */
class_test((ppt->has_scalars == _FALSE_),
psp->error_message,
"you cannot ask for matter power spectrum since you turned off scalar modes");
/** - check the maximum redshift z_max_pk at which \f$P(k,z)\f$ and \f$ T_i(k,z)\f$ should be
computable by interpolation. If it is equal to zero, only \f$ P(k,z=0)\f$
needs to be computed. If it is higher, we will store in a table
various P(k,tau) at several values of tau generously encompassing
the range 0<z<z_max_pk */
/* if z_max_pk<0, return error */
class_test((psp->z_max_pk < 0),
psp->error_message,
"asked for negative redshift z=%e",psp->z_max_pk);
/* if z_max_pk=0, there is just one value to store */
if (psp->z_max_pk == 0.) {
psp->ln_tau_size=1;
}
/* if z_max_pk>0, store several values (with a comfortable margin above z_max_pk) in view of interpolation */
else{
/* find the first relevant value of tau (last value in the table tau_ampling before tau(z_max)) and infer the number of values of tau at which P(k) must be stored */
class_call(background_tau_of_z(pba,psp->z_max_pk,&tau_min),
pba->error_message,
psp->error_message);
index_tau=0;
class_test((tau_min <= ppt->tau_sampling[index_tau]),
psp->error_message,
"you asked for zmax=%e, i.e. taumin=%e, smaller than or equal to the first possible value =%e; it should be strictly bigger for a successfull interpolation",psp->z_max_pk,tau_min,ppt->tau_sampling[0]);
while (ppt->tau_sampling[index_tau] < tau_min){
index_tau++;
}
index_tau --;
class_test(index_tau<0,
psp->error_message,
"by construction, this should never happen, a bug must have been introduced somewhere");
/* whenever possible, take a few more values in to avoid boundary effects in the interpolation */
if (index_tau>0) index_tau--;
if (index_tau>0) index_tau--;
if (index_tau>0) index_tau--;
if (index_tau>0) index_tau--;
psp->ln_tau_size=ppt->tau_size-index_tau;
}
/** - allocate and fill table of tau values at which \f$P(k,\tau)\f$ and \f$T_i(k,\tau)\f$ are stored */
class_alloc(psp->ln_tau,sizeof(double)*psp->ln_tau_size,psp->error_message);
for (index_tau=0; index_tau<psp->ln_tau_size; index_tau++) {
psp->ln_tau[index_tau]=log(ppt->tau_sampling[index_tau-psp->ln_tau_size+ppt->tau_size]);
}
/** - allocate and fill table of k values at which \f$ P(k,\tau)\f$ is stored */
psp->ln_k_size = ppt->k_size[ppt->index_md_scalars];
class_alloc(psp->ln_k,sizeof(double)*psp->ln_k_size,psp->error_message);
for (index_k=0; index_k<psp->ln_k_size; index_k++) {
class_test(ppt->k[ppt->index_md_scalars][index_k] <= 0.,
psp->error_message,
"stop to avoid segmentation fault");
psp->ln_k[index_k]=log(ppt->k[ppt->index_md_scalars][index_k]);
}
return _SUCCESS_;
}
/**
* This routine computes a table of values for all matter power spectra P(k),
* given the source functions and primordial spectra.
*
* @param pba Input: pointer to background structure (will provide H, Omega_m at redshift of interest)
* @param ppt Input: pointer to perturbation structure (contain source functions)
* @param ppm Input: pointer to primordial structure
* @param pnl Input: pointer to nonlinear structure
* @param psp Input/Output: pointer to spectra structure
* @return the error status
*/
int spectra_pk(
struct background * pba,
struct perturbs * ppt,
struct primordial * ppm,
struct nonlinear *pnl,
struct spectra * psp
) {
/** Summary: */
/** - define local variables */
int index_md;
int index_ic1,index_ic2,index_ic1_ic2;
int index_k;
int index_tau;
double * primordial_pk; /* array with argument primordial_pk[index_ic_ic] */
double source_ic1;
double source_ic2;
double ln_pk_tot;
/** - check the presence of scalar modes */
class_test((ppt->has_scalars == _FALSE_),
psp->error_message,
"you cannot ask for matter power spectrum since you turned off scalar modes");
index_md = psp->index_md_scalars;
/** - allocate temporary vectors where the primordial spectrum and the background quantities will be stored */
class_alloc(primordial_pk,psp->ic_ic_size[index_md]*sizeof(double),psp->error_message);
/** - allocate and fill array of \f$P(k,\tau)\f$ values */
class_alloc(psp->ln_pk,
sizeof(double)*psp->ln_tau_size*psp->ln_k_size*psp->ic_ic_size[index_md],
psp->error_message);
if (pnl->method != nl_none) {
class_alloc(psp->ln_pk_nl,
sizeof(double)*psp->ln_tau_size*psp->ln_k_size,
psp->error_message);
}
else {
psp->ln_pk_nl = NULL;
}
for (index_tau=0 ; index_tau < psp->ln_tau_size; index_tau++) {
for (index_k=0; index_k<psp->ln_k_size; index_k++) {
class_call(primordial_spectrum_at_k(ppm,index_md,logarithmic,psp->ln_k[index_k],primordial_pk),
ppm->error_message,
psp->error_message);
ln_pk_tot =0;
/* curvature primordial spectrum:
P_R(k) = 1/(2pi^2) k^3 <R R>
so, primordial curvature correlator:
<R R> = (2pi^2) k^-3 P_R(k)
so, delta_m correlator:
P(k) = <delta_m delta_m> = (2pi^2) k^-3 (source_m)^2 P_R(k)
For isocurvature or cross adiabatic-isocurvature parts,
replace one or two 'R' by 'S_i's */
/* part diagonal in initial conditions */
for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic1,psp->ic_size[index_md]);
source_ic1 = ppt->sources[index_md]
[index_ic1 * ppt->tp_size[index_md] + ppt->index_tp_delta_m]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->ln_pk[(index_tau * psp->ln_k_size + index_k)* psp->ic_ic_size[index_md] + index_ic1_ic2] =
log(2.*_PI_*_PI_/exp(3.*psp->ln_k[index_k])
*source_ic1*source_ic1
*exp(primordial_pk[index_ic1_ic2]));
ln_pk_tot += psp->ln_pk[(index_tau * psp->ln_k_size + index_k)* psp->ic_ic_size[index_md] + index_ic1_ic2];
}
/* part non-diagonal in initial conditions */
for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1+1; index_ic2 < psp->ic_size[index_md]; index_ic2++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]);
if (psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) {
source_ic1 = ppt->sources[index_md]
[index_ic1 * ppt->tp_size[index_md] + ppt->index_tp_delta_m]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
source_ic2 = ppt->sources[index_md]
[index_ic2 * ppt->tp_size[index_md] + ppt->index_tp_delta_m]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->ln_pk[(index_tau * psp->ln_k_size + index_k)* psp->ic_ic_size[index_md] + index_ic1_ic2] =
primordial_pk[index_ic1_ic2]*SIGN(source_ic1)*SIGN(source_ic2);
ln_pk_tot += psp->ln_pk[(index_tau * psp->ln_k_size + index_k)* psp->ic_ic_size[index_md] + index_ic1_ic2];
}
else {
psp->ln_pk[(index_tau * psp->ln_k_size + index_k)* psp->ic_ic_size[index_md] + index_ic1_ic2] = 0.;
}
}
}
/* if non-linear corrections required, compute the total non-linear matter power spectrum */
if (pnl->method != nl_none) {
psp->ln_pk_nl[index_tau * psp->ln_k_size + index_k] =
ln_pk_tot
+ 2.*log(pnl->nl_corr_density[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]);
}
}
}
/**- if interpolation of \f$P(k,\tau)\f$ will be needed (as a function of tau),
compute array of second derivatives in view of spline interpolation */
if (psp->ln_tau_size > 1) {
class_alloc(psp->ddln_pk,sizeof(double)*psp->ln_tau_size*psp->ln_k_size*psp->ic_ic_size[index_md],psp->error_message);
class_call(array_spline_table_lines(psp->ln_tau,
psp->ln_tau_size,
psp->ln_pk,
psp->ic_ic_size[index_md]*psp->ln_k_size,
psp->ddln_pk,
_SPLINE_EST_DERIV_,
psp->error_message),
psp->error_message,
psp->error_message);
}
/* compute sigma8 (mean variance today in sphere of radius 8/h Mpc */
class_call(spectra_sigma(pba,ppm,psp,8./pba->h,0.,&(psp->sigma8)),
psp->error_message,
psp->error_message);
if (psp->spectra_verbose>0)
fprintf(stdout," -> sigma8=%g (computed till k = %g h/Mpc)\n",
psp->sigma8,
exp(psp->ln_k[psp->ln_k_size-1])/pba->h);
/**- if interpolation of \f$ P_{NL}(k,\tau)\f$ will be needed (as a function of tau),
compute array of second derivatives in view of spline interpolation */
if (pnl->method != nl_none) {
if (psp->ln_tau_size > 1) {
class_alloc(psp->ddln_pk_nl,sizeof(double)*psp->ln_tau_size*psp->ln_k_size*psp->ic_ic_size[index_md],psp->error_message);
class_call(array_spline_table_lines(psp->ln_tau,
psp->ln_tau_size,
psp->ln_pk_nl,
psp->ln_k_size,
psp->ddln_pk_nl,
_SPLINE_EST_DERIV_,
psp->error_message),
psp->error_message,
psp->error_message);
}
}
free (primordial_pk);
return _SUCCESS_;
}
/**
* This routine computes sigma(R) given P(k) (does not check that k_max is large
* enough)
*
* @param pba Input: pointer to background structure
* @param ppm Input: pointer to primordial structure
* @param psp Input: pointer to spectra structure
* @param z Input: redshift
* @param R Input: radius in Mpc
* @param sigma Output: variance in a sphere of radius R (dimensionless)
*/
int spectra_sigma(
struct background * pba,
struct primordial * ppm,
struct spectra * psp,
double R,
double z,
double * sigma
) {
double pk;
double * pk_ic = NULL;
double * array_for_sigma;
int index_num;
int index_k;
int index_y;
int index_ddy;
int i;
double k,W,x;
if (psp->ic_ic_size[psp->index_md_scalars]>1)
class_alloc(pk_ic,
psp->ic_ic_size[psp->index_md_scalars]*sizeof(double),
psp->error_message);
i=0;
index_k=i;
i++;
index_y=i;
i++;
index_ddy=i;
i++;
index_num=i;
class_alloc(array_for_sigma,
psp->ln_k_size*index_num*sizeof(double),
psp->error_message);
for (i=0;i<psp->ln_k_size;i++) {
k=exp(psp->ln_k[i]);
if (i == (psp->ln_k_size-1)) k *= 0.9999999; // to prevent rounding error leading to k being bigger than maximum value
x=k*R;
W=3./x/x/x*(sin(x)-x*cos(x));
class_call(spectra_pk_at_k_and_z(pba,ppm,psp,k,z,&pk,pk_ic),
psp->error_message,
psp->error_message);
array_for_sigma[i*index_num+index_k]=k;
array_for_sigma[i*index_num+index_y]=k*k*pk*W*W;
}
class_call(array_spline(array_for_sigma,
index_num,
psp->ln_k_size,
index_k,
index_y,
index_ddy,
_SPLINE_EST_DERIV_,
psp->error_message),
psp->error_message,
psp->error_message);
class_call(array_integrate_all_spline(array_for_sigma,
index_num,
psp->ln_k_size,
index_k,
index_y,
index_ddy,
sigma,
psp->error_message),
psp->error_message,
psp->error_message);
free(array_for_sigma);
if (psp->ic_ic_size[psp->index_md_scalars]>1)
free(pk_ic);
*sigma = sqrt(*sigma/(2.*_PI_*_PI_));
return _SUCCESS_;
}
/**
* This routine computes a table of values for all matter power spectra P(k),
* given the source functions and primordial spectra.
*
* @param pba Input: pointer to background structure (will provide density of each species)
* @param ppt Input: pointer to perturbation structure (contain source functions)
* @param psp Input/Output: pointer to spectra structure
* @return the error status
*/
int spectra_matter_transfers(
struct background * pba,
struct perturbs * ppt,
struct spectra * psp
) {
/** Summary: */
/** - define local variables */
int index_md;
int index_ic;
int index_k;
int index_tau;
int last_index_back;
double * pvecback_sp_long; /* array with argument pvecback_sp_long[pba->index_bg] */
double delta_i,theta_i,rho_i;
double delta_rho_tot,rho_tot;
double rho_plus_p_theta_tot,rho_plus_p_tot;
int n_ncdm;
/** - check the presence of scalar modes */
class_test((ppt->has_scalars == _FALSE_),
psp->error_message,
"you cannot ask for matter power spectrum since you turned off scalar modes");
index_md = psp->index_md_scalars;
/** - allocate and fill array of \f$ T_i(k,\tau)\f$ values */
class_alloc(psp->matter_transfer,sizeof(double)*psp->ln_tau_size*psp->ln_k_size*psp->ic_size[index_md]*psp->tr_size,psp->error_message);
/** - allocate temporary vectors where the background quantities will be stored */
class_alloc(pvecback_sp_long,pba->bg_size*sizeof(double),psp->error_message);
for (index_tau=0 ; index_tau < psp->ln_tau_size; index_tau++) {
class_call(background_at_tau(pba,
ppt->tau_sampling[index_tau-psp->ln_tau_size+ppt->tau_size],
/* for this last argument we could have passed
exp(psp->ln_tau[index_tau]) but we would then loose
precision in the exp(log(x)) operation) */
pba->long_info,
pba->inter_normal,
&last_index_back,
pvecback_sp_long),
pba->error_message,
psp->error_message);
for (index_k=0; index_k<psp->ln_k_size; index_k++) {
for (index_ic = 0; index_ic < psp->ic_size[index_md]; index_ic++) {
delta_rho_tot=0.;
rho_tot=0.;
rho_plus_p_theta_tot=0.;
rho_plus_p_tot=0.;
/* T_g(k,tau) */
rho_i = pvecback_sp_long[pba->index_bg_rho_g];
if (ppt->has_source_delta_g == _TRUE_) {
delta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_g]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_g] = delta_i;
delta_rho_tot += rho_i * delta_i;
rho_tot += rho_i;
}
if (ppt->has_source_theta_g == _TRUE_) {
theta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_g]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_g] = theta_i;
rho_plus_p_theta_tot += 4./3. * rho_i * theta_i;
rho_plus_p_tot += 4./3. * rho_i;
}
/* T_b(k,tau) */
rho_i = pvecback_sp_long[pba->index_bg_rho_b];
if (ppt->has_source_delta_b == _TRUE_) {
delta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_b]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_b] = delta_i;
delta_rho_tot += rho_i * delta_i;
}
rho_tot += rho_i;
if (ppt->has_source_theta_b == _TRUE_) {
theta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_b]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_b] = theta_i;
rho_plus_p_theta_tot += rho_i * theta_i;
}
rho_plus_p_tot += rho_i;
/* T_cdm(k,tau) */
if (pba->has_cdm == _TRUE_) {
rho_i = pvecback_sp_long[pba->index_bg_rho_cdm];
if (ppt->has_source_delta_cdm == _TRUE_) {
delta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_cdm]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_cdm] = delta_i;
delta_rho_tot += rho_i * delta_i;
}
rho_tot += rho_i;
if (ppt->has_source_theta_cdm == _TRUE_) {
theta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_cdm]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_cdm] = theta_i;
rho_plus_p_theta_tot += rho_i * theta_i;
}
rho_plus_p_tot += rho_i;
}
/* T_dcdm(k,tau) */
if (pba->has_dcdm == _TRUE_) {
rho_i = pvecback_sp_long[pba->index_bg_rho_dcdm];
if (ppt->has_source_delta_dcdm == _TRUE_) {
delta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_dcdm]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_dcdm] = delta_i;
delta_rho_tot += rho_i * delta_i;
}
rho_tot += rho_i;
if (ppt->has_source_theta_dcdm == _TRUE_) {
theta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_dcdm]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_dcdm] = theta_i;
rho_plus_p_theta_tot += rho_i * theta_i;
}
rho_plus_p_tot += rho_i;
}
/* T_scf(k,tau) */
if (pba->has_scf == _TRUE_) {
rho_i = pvecback_sp_long[pba->index_bg_rho_scf];
if (ppt->has_source_delta_scf == _TRUE_) {
delta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_scf]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_scf] = delta_i;
delta_rho_tot += rho_i * delta_i;
}
rho_tot += rho_i;
if (ppt->has_source_theta_scf == _TRUE_) {
theta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_scf]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_scf] = theta_i;
rho_plus_p_theta_tot += (rho_i + pvecback_sp_long[pba->index_bg_p_scf]) * theta_i;
}
rho_plus_p_tot += (rho_i + pvecback_sp_long[pba->index_bg_p_scf]);
}
/* T_fld(k,tau) */
if (pba->has_fld == _TRUE_) {
rho_i = pvecback_sp_long[pba->index_bg_rho_fld];
if (ppt->has_source_delta_fld == _TRUE_) {
delta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_fld]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_fld] = delta_i;
delta_rho_tot += rho_i * delta_i;
}
rho_tot += rho_i;
if (ppt->has_source_theta_fld == _TRUE_) {
theta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_fld]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_fld] = theta_i;
rho_plus_p_theta_tot += (1. + pba->w0_fld + pba->wa_fld * (1. - pvecback_sp_long[pba->index_bg_a] / pba->a_today)) * rho_i * theta_i;
}
rho_plus_p_tot += (1. + pba->w0_fld + pba->wa_fld * (1. - pvecback_sp_long[pba->index_bg_a] / pba->a_today)) * rho_i;
}
/* T_ur(k,tau) */
if (pba->has_ur == _TRUE_) {
rho_i = pvecback_sp_long[pba->index_bg_rho_ur];
if (ppt->has_source_delta_ur == _TRUE_) {
delta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_ur]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_ur] = delta_i;
delta_rho_tot += rho_i * delta_i;
}
rho_tot += rho_i;
if (ppt->has_source_theta_ur == _TRUE_) {
theta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_ur]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_ur] = theta_i;
rho_plus_p_theta_tot += 4./3. * rho_i * theta_i;
}
rho_plus_p_tot += 4./3. * rho_i;
}
/* T_dr(k,tau) */
if (pba->has_dr == _TRUE_) {
rho_i = pvecback_sp_long[pba->index_bg_rho_dr];
if (ppt->has_source_delta_dr == _TRUE_) {
delta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_dr]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_dr] = delta_i;
delta_rho_tot += rho_i * delta_i;
}
rho_tot += rho_i;
if (ppt->has_source_theta_dr == _TRUE_) {
theta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_dr]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_dr] = theta_i;
rho_plus_p_theta_tot += 4./3. * rho_i * theta_i;
}
rho_plus_p_tot += 4./3. * rho_i;
}
/* T_ncdm_i(k,tau) */
if (pba->has_ncdm == _TRUE_) {
for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++) {
rho_i = pvecback_sp_long[pba->index_bg_rho_ncdm1+n_ncdm];
if (ppt->has_source_delta_ncdm == _TRUE_) {
delta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_ncdm1+n_ncdm]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_ncdm1+n_ncdm] = delta_i;
delta_rho_tot += rho_i * delta_i;
}
rho_tot += rho_i;
if (ppt->has_source_theta_ncdm == _TRUE_) {
theta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_ncdm1+n_ncdm]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_ncdm1+n_ncdm] = theta_i;
rho_plus_p_theta_tot += (rho_i + pvecback_sp_long[pba->index_bg_p_ncdm1+n_ncdm]) * theta_i;
}
rho_plus_p_tot += (rho_i + pvecback_sp_long[pba->index_bg_p_ncdm1+n_ncdm]);
}
}
if (ppt->has_source_phi == _TRUE_) {
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_phi] = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_phi]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
}
if (ppt->has_source_psi == _TRUE_) {
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_psi] = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_psi]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
}
/* could include homogeneous component in rho_tot if uncommented (leave commented to match CMBFAST/CAMB definition) */
/* if (pba->has_lambda == _TRUE_) { */
/* rho_i = pvecback_sp_long[pba->index_bg_rho_lambda]; */
/* rho_tot += rho_i; */
/* } */
/* T_tot(k,tau) */
if (ppt->has_density_transfers == _TRUE_) {
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_tot] = delta_rho_tot/rho_tot;
}
if (ppt->has_velocity_transfers == _TRUE_) {
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_tot] = rho_plus_p_theta_tot/rho_plus_p_tot;
}
}
}
}
/**- if interpolation of \f$ P(k,\tau)\f$ will be needed (as a function of tau),
compute array of second derivatives in view of spline interpolation */
if (psp->ln_tau_size > 1) {
class_alloc(psp->ddmatter_transfer,sizeof(double)*psp->ln_tau_size*psp->ln_k_size*psp->ic_size[index_md]*psp->tr_size,psp->error_message);
class_call(array_spline_table_lines(psp->ln_tau,
psp->ln_tau_size,
psp->matter_transfer,
psp->ic_size[index_md]*psp->ln_k_size*psp->tr_size,
psp->ddmatter_transfer,
_SPLINE_EST_DERIV_,
psp->error_message),
psp->error_message,
psp->error_message);
}
free (pvecback_sp_long);
return _SUCCESS_;
}
int spectra_output_tk_titles(struct background *pba,
struct perturbs *ppt,
enum file_format output_format,
char titles[_MAXTITLESTRINGLENGTH_]
){
int n_ncdm;
char tmp[40];
if (output_format == class_format) {
class_store_columntitle(titles,"k (h/Mpc)",_TRUE_);
if (ppt->has_density_transfers == _TRUE_) {
class_store_columntitle(titles,"d_g",_TRUE_);
class_store_columntitle(titles,"d_b",_TRUE_);
class_store_columntitle(titles,"d_cdm",pba->has_cdm);
class_store_columntitle(titles,"d_fld",pba->has_fld);
class_store_columntitle(titles,"d_ur",pba->has_ur);
if (pba->has_ncdm == _TRUE_) {
for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++) {
sprintf(tmp,"d_ncdm[%d]",n_ncdm);
class_store_columntitle(titles,tmp,_TRUE_);
}
}
class_store_columntitle(titles,"d_dcdm",pba->has_dcdm);
class_store_columntitle(titles,"d_dr",pba->has_dr);
class_store_columntitle(titles,"d_scf",pba->has_scf);
class_store_columntitle(titles,"d_tot",_TRUE_);
class_store_columntitle(titles,"phi",ppt->has_source_phi);
class_store_columntitle(titles,"psi",ppt->has_source_psi);
}
if (ppt->has_velocity_transfers == _TRUE_) {
class_store_columntitle(titles,"t_g",_TRUE_);
class_store_columntitle(titles,"t_b",_TRUE_);
class_store_columntitle(titles,"t_cdm",((pba->has_cdm == _TRUE_) && (ppt->gauge != synchronous)));
class_store_columntitle(titles,"t_fld",pba->has_fld);
class_store_columntitle(titles,"t_ur",pba->has_ur);
if (pba->has_ncdm == _TRUE_) {
for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++) {
sprintf(tmp,"t_ncdm[%d]",n_ncdm);
class_store_columntitle(titles,tmp,_TRUE_);
}
}
class_store_columntitle(titles,"t_dcdm",pba->has_dcdm);
class_store_columntitle(titles,"t_dr",pba->has_dr);
class_store_columntitle(titles,"t__scf",pba->has_scf);
class_store_columntitle(titles,"t_tot",_TRUE_);
}
}
else if (output_format == camb_format) {
class_store_columntitle(titles,"k (h/Mpc)",_TRUE_);
class_store_columntitle(titles,"-T_cdm/k2",_TRUE_);
class_store_columntitle(titles,"-T_b/k2",_TRUE_);
class_store_columntitle(titles,"-T_g/k2",_TRUE_);
class_store_columntitle(titles,"-T_ur/k2",_TRUE_);
class_store_columntitle(titles,"-T_ncdm/k2",_TRUE_);
class_store_columntitle(titles,"-T_tot/k2",_TRUE_);
}
return _SUCCESS_;
}
int spectra_output_tk_data(
struct background * pba,
struct perturbs * ppt,
struct spectra * psp,
enum file_format output_format,
double z,
int number_of_titles,
double *data
) {
int n_ncdm;
double k, k_over_h, k2;
double * tkfull=NULL; /* array with argument
pk_ic[(index_k * psp->ic_size[index_md] + index_ic)*psp->tr_size+index_tr] */
double *tk;
double *dataptr;
int index_md=0;
int index_ic;
int index_k;
int index_tr;
int storeidx;
if (psp->ln_k_size*psp->ic_size[index_md]*psp->tr_size > 0){
class_alloc(tkfull,
psp->ln_k_size*psp->ic_size[index_md]*psp->tr_size*sizeof(double),
psp->error_message);
}
/** - compute \f$T_i(k)\f$ for each k (if several ic's, compute it for each ic; if z_pk = 0, this is done by directly reading inside the pre-computed table; if not, this is done by interpolating the table at the correct value of tau. */
/* if z_pk = 0, no interpolation needed */
if (z == 0.) {
for (index_k=0; index_k<psp->ln_k_size; index_k++) {
for (index_tr=0; index_tr<psp->tr_size; index_tr++) {
for (index_ic=0; index_ic<psp->ic_size[index_md]; index_ic++) {
tkfull[(index_k * psp->ic_size[index_md] + index_ic) * psp->tr_size + index_tr] = psp->matter_transfer[(((psp->ln_tau_size-1)*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + index_tr];
}
}
}
}
/* if 0 <= z_pk <= z_max_pk, interpolation needed, */
else {
class_call(spectra_tk_at_z(pba,
psp,
z,
tkfull),
psp->error_message,
psp->error_message);
}
/** - store data */
for (index_ic = 0; index_ic < psp->ic_size[index_md]; index_ic++) {
for (index_k=0; index_k<psp->ln_k_size; index_k++) {
storeidx = 0;
dataptr = data+index_ic*(psp->ln_k_size*number_of_titles)+index_k*number_of_titles;
tk = &(tkfull[(index_k * psp->ic_size[index_md] + index_ic) * psp->tr_size]);
k = exp(psp->ln_k[index_k]);
k2 = k*k;
k_over_h = k/pba->h;
class_store_double(dataptr, k_over_h, _TRUE_,storeidx);
/* indices for species associated with a velocity transfer function in Fourier space */
if (output_format == class_format) {
if (ppt->has_density_transfers == _TRUE_) {
class_store_double(dataptr,tk[psp->index_tr_delta_g],ppt->has_source_delta_g,storeidx);
class_store_double(dataptr,tk[psp->index_tr_delta_b],ppt->has_source_delta_b,storeidx);
class_store_double(dataptr,tk[psp->index_tr_delta_cdm],ppt->has_source_delta_cdm,storeidx);
class_store_double(dataptr,tk[psp->index_tr_delta_fld],ppt->has_source_delta_fld,storeidx);
class_store_double(dataptr,tk[psp->index_tr_delta_ur],ppt->has_source_delta_ur,storeidx);
if (pba->has_ncdm == _TRUE_){
for (n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++){
class_store_double(dataptr,tk[psp->index_tr_delta_ncdm1+n_ncdm],ppt->has_source_delta_ncdm,storeidx);
}
}
class_store_double(dataptr,tk[psp->index_tr_delta_dcdm],ppt->has_source_delta_dcdm,storeidx);
class_store_double(dataptr,tk[psp->index_tr_delta_dr],ppt->has_source_delta_dr,storeidx);
class_store_double(dataptr,tk[psp->index_tr_delta_scf],ppt->has_source_delta_scf,storeidx);
class_store_double(dataptr,tk[psp->index_tr_delta_tot],_TRUE_,storeidx);
class_store_double(dataptr,tk[psp->index_tr_phi],ppt->has_source_phi,storeidx);
class_store_double(dataptr,tk[psp->index_tr_psi],ppt->has_source_psi,storeidx);
}
if (ppt->has_velocity_transfers == _TRUE_) {
class_store_double(dataptr,tk[psp->index_tr_theta_g],ppt->has_source_theta_g,storeidx);
class_store_double(dataptr,tk[psp->index_tr_theta_b],ppt->has_source_theta_b,storeidx);
class_store_double(dataptr,tk[psp->index_tr_theta_cdm],ppt->has_source_theta_cdm,storeidx);
class_store_double(dataptr,tk[psp->index_tr_theta_fld],ppt->has_source_theta_fld,storeidx);
class_store_double(dataptr,tk[psp->index_tr_theta_ur],ppt->has_source_theta_ur,storeidx);
if (pba->has_ncdm == _TRUE_){
for (n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++){
class_store_double(dataptr,tk[psp->index_tr_theta_ncdm1+n_ncdm],ppt->has_source_theta_ncdm,storeidx);
}
}
class_store_double(dataptr,tk[psp->index_tr_theta_dcdm],ppt->has_source_theta_dcdm,storeidx);
class_store_double(dataptr,tk[psp->index_tr_theta_dr],ppt->has_source_theta_dr,storeidx);
class_store_double(dataptr,tk[psp->index_tr_theta_scf],ppt->has_source_theta_scf,storeidx);
class_store_double(dataptr,tk[psp->index_tr_theta_tot],_TRUE_,storeidx);
}
}
else if (output_format == camb_format) {
/* rescale and reorder the matter transfer functions following the CMBFAST/CAMB convention */
class_store_double_or_default(dataptr,-tk[psp->index_tr_delta_cdm]/k2,ppt->has_source_delta_cdm,storeidx,0.0);
class_store_double_or_default(dataptr,-tk[psp->index_tr_delta_b]/k2,ppt->has_source_delta_b,storeidx,0.0);
class_store_double_or_default(dataptr,-tk[psp->index_tr_delta_g]/k2,ppt->has_source_delta_g,storeidx,0.0);
class_store_double_or_default(dataptr,-tk[psp->index_tr_delta_ur]/k2,ppt->has_source_delta_ur,storeidx,0.0);
class_store_double_or_default(dataptr,-tk[psp->index_tr_delta_ncdm1]/k2,ppt->has_source_delta_ncdm,storeidx,0.0);
class_store_double_or_default(dataptr,-tk[psp->index_tr_delta_tot]/k2,_TRUE_,storeidx,0.0);
}
}
}
//Necessary because the size could be zero (if psp->tr_size is zero)
if (tkfull != NULL)
free(tkfull);
return _SUCCESS_;
}
int spectra_firstline_and_ic_suffix(struct perturbs *ppt,
int index_ic,
char first_line[_LINE_LENGTH_MAX_],
FileName ic_suffix){
first_line[0]='\0';
ic_suffix[0]='\0';
if ((ppt->has_ad == _TRUE_) && (index_ic == ppt->index_ic_ad)) {
strcpy(ic_suffix,"ad");
strcpy(first_line,"for adiabatic (AD) mode (normalized to initial curvature=1) ");
}
if ((ppt->has_bi == _TRUE_) && (index_ic == ppt->index_ic_bi)) {
strcpy(ic_suffix,"bi");
strcpy(first_line,"for baryon isocurvature (BI) mode (normalized to initial entropy=1)");
}
if ((ppt->has_cdi == _TRUE_) && (index_ic == ppt->index_ic_cdi)) {
strcpy(ic_suffix,"cdi");
strcpy(first_line,"for CDM isocurvature (CDI) mode (normalized to initial entropy=1)");
}
if ((ppt->has_nid == _TRUE_) && (index_ic == ppt->index_ic_nid)) {
strcpy(ic_suffix,"nid");
strcpy(first_line,"for neutrino density isocurvature (NID) mode (normalized to initial entropy=1)");
}
if ((ppt->has_niv == _TRUE_) && (index_ic == ppt->index_ic_niv)) {
strcpy(ic_suffix,"niv");
strcpy(first_line,"for neutrino velocity isocurvature (NIV) mode (normalized to initial entropy=1)");
}
return _SUCCESS_;
}
|
double_reduction.c | #include <stdio.h>
#include <omp.h>
int main()
{
double result = 0.0;
#pragma omp parallel reduction(max:result)
{
result = omp_get_thread_num();
}
printf("Result: %f\n", result);
}
|
test.c | #include <stdlib.h>
#include <stdint.h>
#include <check.h>
#include <omp.h>
static uint64_t fib_seq(int n)
{ /*{{{*/
if (n < 2)
return n;
return fib_seq(n - 1) + fib_seq(n - 2);
} /*}}}*/
static uint64_t fib(int n, int d, int cutoff)
{ /*{{{*/
uint64_t x, y;
if (n < 2)
return n;
if (d < cutoff)
{
#pragma omp task shared(x) firstprivate(n, d)
x = fib(n - 1, d + 1, cutoff);
#pragma omp task shared(y) firstprivate(n, d)
y = fib(n - 2, d + 1, cutoff);
#pragma omp taskwait
}
else
{
x = fib_seq(n - 1);
y = fib_seq(n - 2);
}
return x + y;
} /*}}}*/
START_TEST(fib_omp)
{/*{{{*/
int result;
int n = 42;
int cutoff = 12;
#pragma omp parallel
{
#pragma omp single
{
#pragma omp task firstprivate(n, cutoff)
{
result = fib(n, 0, cutoff);
}
#pragma omp taskwait
}
}
/* fib(42) == 267914296 */
ck_assert_int_eq(result, 267914296);
}/*}}}*/
END_TEST
Suite* test_suite(void)
{/*{{{*/
Suite* s;
s = suite_create("Test");
TCase* tc = tcase_create("fib_omp");
tcase_add_test(tc, fib_omp);
suite_add_tcase(s, tc);
return s;
}/*}}}*/
int main(void)
{/*{{{*/
int number_failed;
Suite* s;
SRunner* sr;
s = test_suite();
sr = srunner_create(s);
srunner_run_all(sr, CK_VERBOSE);
number_failed = srunner_ntests_failed(sr);
srunner_free(sr);
return (number_failed == 0) ? EXIT_SUCCESS : EXIT_FAILURE;
}/*}}}*/
|
effect.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% EEEEE FFFFF FFFFF EEEEE CCCC TTTTT %
% E F F E C T %
% EEE FFF FFF EEE C T %
% E F F E C T %
% EEEEE F F EEEEE CCCC T %
% %
% %
% MagickCore Image Effects Methods %
% %
% Software Design %
% John Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/accelerate.h"
#include "magick/blob.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/constitute.h"
#include "magick/decorate.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/effect.h"
#include "magick/fx.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/montage.h"
#include "magick/morphology.h"
#include "magick/paint.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/quantum.h"
#include "magick/random_.h"
#include "magick/random-private.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/resize.h"
#include "magick/resource_.h"
#include "magick/segment.h"
#include "magick/shear.h"
#include "magick/signature-private.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/transform.h"
#include "magick/threshold.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveBlurImage() adaptively blurs the image by blurring less
% intensely near image edges and more intensely far from edges. We blur the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and AdaptiveBlurImage() selects a suitable radius for you.
%
% The format of the AdaptiveBlurImage method is:
%
% Image *AdaptiveBlurImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
% Image *AdaptiveBlurImageChannel(const Image *image,
% const ChannelType channel,double radius,const double sigma,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveBlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*blur_image;
blur_image=AdaptiveBlurImageChannel(image,DefaultChannels,radius,sigma,
exception);
return(blur_image);
}
MagickExport Image *AdaptiveBlurImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
ExceptionInfo *exception)
{
#define AdaptiveBlurImageTag "Convolve/Image"
#define MagickSigma (fabs(sigma) <= MagickEpsilon ? 1.0 : sigma)
CacheView
*blur_view,
*edge_view,
*image_view;
double
**kernel,
normalize;
Image
*blur_image,
*edge_image,
*gaussian_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
register ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v,
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (fabs(sigma) <= MagickEpsilon)
return(blur_image);
if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse)
{
InheritException(exception,&blur_image->exception);
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
/*
Edge detect the image brighness channel, level, blur, and level again.
*/
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
{
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
(void) LevelImage(edge_image,"20%,95%");
gaussian_image=GaussianBlurImage(edge_image,radius,sigma,exception);
if (gaussian_image != (Image *) NULL)
{
edge_image=DestroyImage(edge_image);
edge_image=gaussian_image;
}
(void) LevelImage(edge_image,"10%,95%");
/*
Create a set of kernels from maximum (radius,sigma) to minimum.
*/
width=GetOptimalKernelWidth2D(radius,sigma);
kernel=(double **) AcquireQuantumMemory((size_t) width,sizeof(*kernel));
if (kernel == (double **) NULL)
{
edge_image=DestroyImage(edge_image);
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) ResetMagickMemory(kernel,0,(size_t) width*sizeof(*kernel));
for (i=0; i < (ssize_t) width; i+=2)
{
kernel[i]=(double *) AcquireQuantumMemory((size_t) (width-i),(width-i)*
sizeof(**kernel));
if (kernel[i] == (double *) NULL)
break;
normalize=0.0;
j=(ssize_t) (width-i)/2;
k=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel[i][k]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel[i][k];
k++;
}
}
if (fabs(normalize) <= MagickEpsilon)
normalize=1.0;
normalize=1.0/normalize;
for (k=0; k < (j*j); k++)
kernel[i][k]=normalize*kernel[i][k];
}
if (i < (ssize_t) width)
{
for (i-=2; i >= 0; i-=2)
kernel[i]=(double *) RelinquishMagickMemory(kernel[i]);
kernel=(double **) RelinquishMagickMemory(kernel);
edge_image=DestroyImage(edge_image);
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Adaptively blur image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
SetMagickPixelPacketBias(image,&bias);
image_view=AcquireCacheView(image);
edge_view=AcquireCacheView(edge_image);
blur_view=AcquireCacheView(blur_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) blur_image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p,
*restrict r;
register IndexPacket
*restrict blur_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((r == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view);
for (x=0; x < (ssize_t) blur_image->columns; x++)
{
MagickPixelPacket
pixel;
MagickRealType
alpha,
gamma;
register const double
*restrict k;
register ssize_t
i,
u,
v;
gamma=0.0;
i=(ssize_t) ceil((double) width*QuantumScale*PixelIntensity(r)-0.5);
if (i < 0)
i=0;
else
if (i > (ssize_t) width)
i=(ssize_t) width;
if ((i & 0x01) != 0)
i--;
p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-i)/2L),y-
(ssize_t) ((width-i)/2L),width-i,width-i,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
pixel=bias;
k=kernel[i];
for (v=0; v < (ssize_t) (width-i); v++)
{
for (u=0; u < (ssize_t) (width-i); u++)
{
alpha=1.0;
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
alpha=(MagickRealType) (QuantumScale*GetAlphaPixelComponent(p));
if ((channel & RedChannel) != 0)
pixel.red+=(*k)*alpha*GetRedPixelComponent(p);
if ((channel & GreenChannel) != 0)
pixel.green+=(*k)*alpha*GetGreenPixelComponent(p);
if ((channel & BlueChannel) != 0)
pixel.blue+=(*k)*alpha*GetBluePixelComponent(p);
if ((channel & OpacityChannel) != 0)
pixel.opacity+=(*k)*GetOpacityPixelComponent(p);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.index+=(*k)*alpha*indexes[x+(width-i)*v+u];
gamma+=(*k)*alpha;
k++;
p++;
}
}
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
if ((channel & RedChannel) != 0)
q->red=ClampToQuantum(gamma*GetRedPixelComponent(&pixel));
if ((channel & GreenChannel) != 0)
q->green=ClampToQuantum(gamma*GetGreenPixelComponent(&pixel));
if ((channel & BlueChannel) != 0)
q->blue=ClampToQuantum(gamma*GetBluePixelComponent(&pixel));
if ((channel & OpacityChannel) != 0)
SetOpacityPixelComponent(q,ClampOpacityPixelComponent(&pixel));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
blur_indexes[x]=ClampToQuantum(gamma*GetIndexPixelComponent(&pixel));
q++;
r++;
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AdaptiveBlurImageChannel)
#endif
proceed=SetImageProgress(image,AdaptiveBlurImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_image->type=image->type;
blur_view=DestroyCacheView(blur_view);
edge_view=DestroyCacheView(edge_view);
image_view=DestroyCacheView(image_view);
edge_image=DestroyImage(edge_image);
for (i=0; i < (ssize_t) width; i+=2)
kernel[i]=(double *) RelinquishMagickMemory(kernel[i]);
kernel=(double **) RelinquishMagickMemory(kernel);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e S h a r p e n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveSharpenImage() adaptively sharpens the image by sharpening more
% intensely near image edges and less intensely far from edges. We sharpen the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and AdaptiveSharpenImage() selects a suitable radius for you.
%
% The format of the AdaptiveSharpenImage method is:
%
% Image *AdaptiveSharpenImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
% Image *AdaptiveSharpenImageChannel(const Image *image,
% const ChannelType channel,double radius,const double sigma,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveSharpenImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*sharp_image;
sharp_image=AdaptiveSharpenImageChannel(image,DefaultChannels,radius,sigma,
exception);
return(sharp_image);
}
MagickExport Image *AdaptiveSharpenImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
ExceptionInfo *exception)
{
#define AdaptiveSharpenImageTag "Convolve/Image"
#define MagickSigma (fabs(sigma) <= MagickEpsilon ? 1.0 : sigma)
CacheView
*sharp_view,
*edge_view,
*image_view;
double
**kernel,
normalize;
Image
*sharp_image,
*edge_image,
*gaussian_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
register ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v,
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
sharp_image=CloneImage(image,0,0,MagickTrue,exception);
if (sharp_image == (Image *) NULL)
return((Image *) NULL);
if (fabs(sigma) <= MagickEpsilon)
return(sharp_image);
if (SetImageStorageClass(sharp_image,DirectClass) == MagickFalse)
{
InheritException(exception,&sharp_image->exception);
sharp_image=DestroyImage(sharp_image);
return((Image *) NULL);
}
/*
Edge detect the image brighness channel, level, sharp, and level again.
*/
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
{
sharp_image=DestroyImage(sharp_image);
return((Image *) NULL);
}
(void) LevelImage(edge_image,"20%,95%");
gaussian_image=GaussianBlurImage(edge_image,radius,sigma,exception);
if (gaussian_image != (Image *) NULL)
{
edge_image=DestroyImage(edge_image);
edge_image=gaussian_image;
}
(void) LevelImage(edge_image,"10%,95%");
/*
Create a set of kernels from maximum (radius,sigma) to minimum.
*/
width=GetOptimalKernelWidth2D(radius,sigma);
kernel=(double **) AcquireQuantumMemory((size_t) width,sizeof(*kernel));
if (kernel == (double **) NULL)
{
edge_image=DestroyImage(edge_image);
sharp_image=DestroyImage(sharp_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) ResetMagickMemory(kernel,0,(size_t) width*sizeof(*kernel));
for (i=0; i < (ssize_t) width; i+=2)
{
kernel[i]=(double *) AcquireQuantumMemory((size_t) (width-i),(width-i)*
sizeof(**kernel));
if (kernel[i] == (double *) NULL)
break;
normalize=0.0;
j=(ssize_t) (width-i)/2;
k=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel[i][k]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel[i][k];
k++;
}
}
if (fabs(normalize) <= MagickEpsilon)
normalize=1.0;
normalize=1.0/normalize;
for (k=0; k < (j*j); k++)
kernel[i][k]=normalize*kernel[i][k];
}
if (i < (ssize_t) width)
{
for (i-=2; i >= 0; i-=2)
kernel[i]=(double *) RelinquishMagickMemory(kernel[i]);
kernel=(double **) RelinquishMagickMemory(kernel);
edge_image=DestroyImage(edge_image);
sharp_image=DestroyImage(sharp_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Adaptively sharpen image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
SetMagickPixelPacketBias(image,&bias);
image_view=AcquireCacheView(image);
edge_view=AcquireCacheView(edge_image);
sharp_view=AcquireCacheView(sharp_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) sharp_image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p,
*restrict r;
register IndexPacket
*restrict sharp_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(sharp_view,0,y,sharp_image->columns,1,
exception);
if ((r == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
sharp_indexes=GetCacheViewAuthenticIndexQueue(sharp_view);
for (x=0; x < (ssize_t) sharp_image->columns; x++)
{
MagickPixelPacket
pixel;
MagickRealType
alpha,
gamma;
register const double
*restrict k;
register ssize_t
i,
u,
v;
gamma=0.0;
i=(ssize_t) ceil((double) width*(QuantumRange-QuantumScale*
PixelIntensity(r))-0.5);
if (i < 0)
i=0;
else
if (i > (ssize_t) width)
i=(ssize_t) width;
if ((i & 0x01) != 0)
i--;
p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-i)/2L),y-
(ssize_t) ((width-i)/2L),width-i,width-i,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
k=kernel[i];
pixel=bias;
for (v=0; v < (ssize_t) (width-i); v++)
{
for (u=0; u < (ssize_t) (width-i); u++)
{
alpha=1.0;
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
alpha=(MagickRealType) (QuantumScale*GetAlphaPixelComponent(p));
if ((channel & RedChannel) != 0)
pixel.red+=(*k)*alpha*GetRedPixelComponent(p);
if ((channel & GreenChannel) != 0)
pixel.green+=(*k)*alpha*GetGreenPixelComponent(p);
if ((channel & BlueChannel) != 0)
pixel.blue+=(*k)*alpha*GetBluePixelComponent(p);
if ((channel & OpacityChannel) != 0)
pixel.opacity+=(*k)*GetOpacityPixelComponent(p);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.index+=(*k)*alpha*indexes[x+(width-i)*v+u];
gamma+=(*k)*alpha;
k++;
p++;
}
}
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
if ((channel & RedChannel) != 0)
q->red=ClampToQuantum(gamma*GetRedPixelComponent(&pixel));
if ((channel & GreenChannel) != 0)
q->green=ClampToQuantum(gamma*GetGreenPixelComponent(&pixel));
if ((channel & BlueChannel) != 0)
q->blue=ClampToQuantum(gamma*GetBluePixelComponent(&pixel));
if ((channel & OpacityChannel) != 0)
SetOpacityPixelComponent(q,ClampOpacityPixelComponent(&pixel));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
sharp_indexes[x]=ClampToQuantum(gamma*GetIndexPixelComponent(&pixel));
q++;
r++;
}
if (SyncCacheViewAuthenticPixels(sharp_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AdaptiveSharpenImageChannel)
#endif
proceed=SetImageProgress(image,AdaptiveSharpenImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sharp_image->type=image->type;
sharp_view=DestroyCacheView(sharp_view);
edge_view=DestroyCacheView(edge_view);
image_view=DestroyCacheView(image_view);
edge_image=DestroyImage(edge_image);
for (i=0; i < (ssize_t) width; i+=2)
kernel[i]=(double *) RelinquishMagickMemory(kernel[i]);
kernel=(double **) RelinquishMagickMemory(kernel);
if (status == MagickFalse)
sharp_image=DestroyImage(sharp_image);
return(sharp_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlurImage() blurs an image. We convolve the image with a Gaussian operator
% of the given radius and standard deviation (sigma). For reasonable results,
% the radius should be larger than sigma. Use a radius of 0 and BlurImage()
% selects a suitable radius for you.
%
% BlurImage() differs from GaussianBlurImage() in that it uses a separable
% kernel which is faster but mathematically equivalent to the non-separable
% kernel.
%
% The format of the BlurImage method is:
%
% Image *BlurImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
% Image *BlurImageChannel(const Image *image,const ChannelType channel,
% const double radius,const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*blur_image;
blur_image=BlurImageChannel(image,DefaultChannels,radius,sigma,exception);
return(blur_image);
}
static double *GetBlurKernel(const size_t width,const double sigma)
{
double
*kernel,
normalize;
register ssize_t
i;
ssize_t
j,
k;
/*
Generate a 1-D convolution kernel.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
kernel=(double *) AcquireQuantumMemory((size_t) width,sizeof(*kernel));
if (kernel == (double *) NULL)
return(0);
normalize=0.0;
j=(ssize_t) width/2;
i=0;
for (k=(-j); k <= j; k++)
{
kernel[i]=(double) (exp(-((double) k*k)/(2.0*MagickSigma*MagickSigma))/
(MagickSQ2PI*MagickSigma));
normalize+=kernel[i];
i++;
}
for (i=0; i < (ssize_t) width; i++)
kernel[i]/=normalize;
return(kernel);
}
MagickExport Image *BlurImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
ExceptionInfo *exception)
{
#define BlurImageTag "Blur/Image"
CacheView
*blur_view,
*image_view;
double
*kernel;
Image
*blur_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
register ssize_t
i;
size_t
width;
ssize_t
x,
y;
/*
Initialize blur image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (fabs(sigma) <= MagickEpsilon)
return(blur_image);
if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse)
{
InheritException(exception,&blur_image->exception);
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
width=GetOptimalKernelWidth1D(radius,sigma);
kernel=GetBlurKernel(width,sigma);
if (kernel == (double *) NULL)
{
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
if (image->debug != MagickFalse)
{
char
format[MaxTextExtent],
*message;
register const double
*k;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" BlurImage with %.20g kernel:",(double) width);
message=AcquireString("");
k=kernel;
for (i=0; i < (ssize_t) width; i++)
{
*message='\0';
(void) FormatMagickString(format,MaxTextExtent,"%.20g: ",(double) i);
(void) ConcatenateString(&message,format);
(void) FormatMagickString(format,MaxTextExtent,"%g ",*k++);
(void) ConcatenateString(&message,format);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
/*
Blur rows.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
SetMagickPixelPacketBias(image,&bias);
image_view=AcquireCacheView(image);
blur_view=AcquireCacheView(blur_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) blur_image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict blur_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y,
image->columns+width,1,exception);
q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view);
for (x=0; x < (ssize_t) blur_image->columns; x++)
{
MagickPixelPacket
pixel;
register const double
*restrict k;
register const PixelPacket
*restrict kernel_pixels;
register ssize_t
i;
pixel=bias;
k=kernel;
kernel_pixels=p;
if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse))
{
for (i=0; i < (ssize_t) width; i++)
{
pixel.red+=(*k)*kernel_pixels->red;
pixel.green+=(*k)*kernel_pixels->green;
pixel.blue+=(*k)*kernel_pixels->blue;
k++;
kernel_pixels++;
}
if ((channel & RedChannel) != 0)
SetRedPixelComponent(q,ClampRedPixelComponent(&pixel));
if ((channel & GreenChannel) != 0)
SetGreenPixelComponent(q,ClampGreenPixelComponent(&pixel));
if ((channel & BlueChannel) != 0)
SetBluePixelComponent(q,ClampBluePixelComponent(&pixel));
if ((channel & OpacityChannel) != 0)
{
k=kernel;
kernel_pixels=p;
for (i=0; i < (ssize_t) width; i++)
{
pixel.opacity+=(*k)*kernel_pixels->opacity;
k++;
kernel_pixels++;
}
SetOpacityPixelComponent(q,ClampOpacityPixelComponent(&pixel));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
register const IndexPacket
*restrict kernel_indexes;
k=kernel;
kernel_indexes=indexes;
for (i=0; i < (ssize_t) width; i++)
{
pixel.index+=(*k)*(*kernel_indexes);
k++;
kernel_indexes++;
}
blur_indexes[x]=ClampToQuantum(pixel.index);
}
}
else
{
MagickRealType
alpha,
gamma;
gamma=0.0;
for (i=0; i < (ssize_t) width; i++)
{
alpha=(MagickRealType) (QuantumScale*
GetAlphaPixelComponent(kernel_pixels));
pixel.red+=(*k)*alpha*kernel_pixels->red;
pixel.green+=(*k)*alpha*kernel_pixels->green;
pixel.blue+=(*k)*alpha*kernel_pixels->blue;
gamma+=(*k)*alpha;
k++;
kernel_pixels++;
}
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
if ((channel & RedChannel) != 0)
q->red=ClampToQuantum(gamma*GetRedPixelComponent(&pixel));
if ((channel & GreenChannel) != 0)
q->green=ClampToQuantum(gamma*GetGreenPixelComponent(&pixel));
if ((channel & BlueChannel) != 0)
q->blue=ClampToQuantum(gamma*GetBluePixelComponent(&pixel));
if ((channel & OpacityChannel) != 0)
{
k=kernel;
kernel_pixels=p;
for (i=0; i < (ssize_t) width; i++)
{
pixel.opacity+=(*k)*kernel_pixels->opacity;
k++;
kernel_pixels++;
}
SetOpacityPixelComponent(q,ClampOpacityPixelComponent(&pixel));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
register const IndexPacket
*restrict kernel_indexes;
k=kernel;
kernel_pixels=p;
kernel_indexes=indexes;
for (i=0; i < (ssize_t) width; i++)
{
alpha=(MagickRealType) (QuantumScale*
GetAlphaPixelComponent(kernel_pixels));
pixel.index+=(*k)*alpha*(*kernel_indexes);
k++;
kernel_pixels++;
kernel_indexes++;
}
blur_indexes[x]=ClampToQuantum(gamma*
GetIndexPixelComponent(&pixel));
}
}
indexes++;
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_BlurImageChannel)
#endif
proceed=SetImageProgress(image,BlurImageTag,progress++,blur_image->rows+
blur_image->columns);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_view=DestroyCacheView(blur_view);
image_view=DestroyCacheView(image_view);
/*
Blur columns.
*/
image_view=AcquireCacheView(blur_image);
blur_view=AcquireCacheView(blur_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (x=0; x < (ssize_t) blur_image->columns; x++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict blur_indexes;
register PixelPacket
*restrict q;
register ssize_t
y;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,x,-((ssize_t) width/2L),1,
image->rows+width,exception);
q=GetCacheViewAuthenticPixels(blur_view,x,0,1,blur_image->rows,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view);
for (y=0; y < (ssize_t) blur_image->rows; y++)
{
MagickPixelPacket
pixel;
register const double
*restrict k;
register const PixelPacket
*restrict kernel_pixels;
register ssize_t
i;
pixel=bias;
k=kernel;
kernel_pixels=p;
if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse))
{
for (i=0; i < (ssize_t) width; i++)
{
pixel.red+=(*k)*kernel_pixels->red;
pixel.green+=(*k)*kernel_pixels->green;
pixel.blue+=(*k)*kernel_pixels->blue;
k++;
kernel_pixels++;
}
if ((channel & RedChannel) != 0)
SetRedPixelComponent(q,ClampRedPixelComponent(&pixel));
if ((channel & GreenChannel) != 0)
SetGreenPixelComponent(q,ClampGreenPixelComponent(&pixel));
if ((channel & BlueChannel) != 0)
SetBluePixelComponent(q,ClampBluePixelComponent(&pixel));
if ((channel & OpacityChannel) != 0)
{
k=kernel;
kernel_pixels=p;
for (i=0; i < (ssize_t) width; i++)
{
pixel.opacity+=(*k)*kernel_pixels->opacity;
k++;
kernel_pixels++;
}
SetOpacityPixelComponent(q,ClampOpacityPixelComponent(&pixel));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
register const IndexPacket
*restrict kernel_indexes;
k=kernel;
kernel_indexes=indexes;
for (i=0; i < (ssize_t) width; i++)
{
pixel.index+=(*k)*(*kernel_indexes);
k++;
kernel_indexes++;
}
blur_indexes[y]=ClampToQuantum(pixel.index);
}
}
else
{
MagickRealType
alpha,
gamma;
gamma=0.0;
for (i=0; i < (ssize_t) width; i++)
{
alpha=(MagickRealType) (QuantumScale*
GetAlphaPixelComponent(kernel_pixels));
pixel.red+=(*k)*alpha*kernel_pixels->red;
pixel.green+=(*k)*alpha*kernel_pixels->green;
pixel.blue+=(*k)*alpha*kernel_pixels->blue;
gamma+=(*k)*alpha;
k++;
kernel_pixels++;
}
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
if ((channel & RedChannel) != 0)
q->red=ClampToQuantum(gamma*GetRedPixelComponent(&pixel));
if ((channel & GreenChannel) != 0)
q->green=ClampToQuantum(gamma*GetGreenPixelComponent(&pixel));
if ((channel & BlueChannel) != 0)
q->blue=ClampToQuantum(gamma*GetBluePixelComponent(&pixel));
if ((channel & OpacityChannel) != 0)
{
k=kernel;
kernel_pixels=p;
for (i=0; i < (ssize_t) width; i++)
{
pixel.opacity+=(*k)*kernel_pixels->opacity;
k++;
kernel_pixels++;
}
SetOpacityPixelComponent(q,ClampOpacityPixelComponent(&pixel));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
register const IndexPacket
*restrict kernel_indexes;
k=kernel;
kernel_pixels=p;
kernel_indexes=indexes;
for (i=0; i < (ssize_t) width; i++)
{
alpha=(MagickRealType) (QuantumScale*
GetAlphaPixelComponent(kernel_pixels));
pixel.index+=(*k)*alpha*(*kernel_indexes);
k++;
kernel_pixels++;
kernel_indexes++;
}
blur_indexes[y]=ClampToQuantum(gamma*
GetIndexPixelComponent(&pixel));
}
}
indexes++;
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_BlurImageChannel)
#endif
proceed=SetImageProgress(image,BlurImageTag,progress++,blur_image->rows+
blur_image->columns);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_view=DestroyCacheView(blur_view);
image_view=DestroyCacheView(image_view);
kernel=(double *) RelinquishMagickMemory(kernel);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
blur_image->type=image->type;
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n v o l v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvolveImage() applies a custom convolution kernel to the image.
%
% The format of the ConvolveImage method is:
%
% Image *ConvolveImage(const Image *image,const size_t order,
% const double *kernel,ExceptionInfo *exception)
% Image *ConvolveImageChannel(const Image *image,const ChannelType channel,
% const size_t order,const double *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o order: the number of columns and rows in the filter kernel.
%
% o kernel: An array of double representing the convolution kernel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConvolveImage(const Image *image,const size_t order,
const double *kernel,ExceptionInfo *exception)
{
Image
*convolve_image;
convolve_image=ConvolveImageChannel(image,DefaultChannels,order,kernel,
exception);
return(convolve_image);
}
MagickExport Image *ConvolveImageChannel(const Image *image,
const ChannelType channel,const size_t order,const double *kernel,
ExceptionInfo *exception)
{
#define ConvolveImageTag "Convolve/Image"
CacheView
*convolve_view,
*image_view;
double
*normal_kernel;
Image
*convolve_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
MagickRealType
gamma;
register ssize_t
i;
size_t
width;
ssize_t
y;
/*
Initialize convolve image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
width=order;
if ((width % 2) == 0)
ThrowImageException(OptionError,"KernelWidthMustBeAnOddNumber");
convolve_image=CloneImage(image,0,0,MagickTrue,exception);
if (convolve_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(convolve_image,DirectClass) == MagickFalse)
{
InheritException(exception,&convolve_image->exception);
convolve_image=DestroyImage(convolve_image);
return((Image *) NULL);
}
if (image->debug != MagickFalse)
{
char
format[MaxTextExtent],
*message;
register const double
*k;
ssize_t
u,
v;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" ConvolveImage with %.20gx%.20g kernel:",(double) width,(double)
width);
message=AcquireString("");
k=kernel;
for (v=0; v < (ssize_t) width; v++)
{
*message='\0';
(void) FormatMagickString(format,MaxTextExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < (ssize_t) width; u++)
{
(void) FormatMagickString(format,MaxTextExtent,"%g ",*k++);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
/*
Normalize kernel.
*/
normal_kernel=(double *) AcquireQuantumMemory(width*width,
sizeof(*normal_kernel));
if (normal_kernel == (double *) NULL)
{
convolve_image=DestroyImage(convolve_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
gamma=0.0;
for (i=0; i < (ssize_t) (width*width); i++)
gamma+=kernel[i];
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
for (i=0; i < (ssize_t) (width*width); i++)
normal_kernel[i]=gamma*kernel[i];
/*
Convolve image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
SetMagickPixelPacketBias(image,&bias);
image_view=AcquireCacheView(image);
convolve_view=AcquireCacheView(convolve_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict convolve_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t)
(width/2L),image->columns+width,width,exception);
q=GetCacheViewAuthenticPixels(convolve_view,0,y,convolve_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
convolve_indexes=GetCacheViewAuthenticIndexQueue(convolve_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickPixelPacket
pixel;
register const double
*restrict k;
register const PixelPacket
*restrict kernel_pixels;
register ssize_t
u;
ssize_t
v;
pixel=bias;
k=normal_kernel;
kernel_pixels=p;
if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse))
{
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
pixel.red+=(*k)*kernel_pixels[u].red;
pixel.green+=(*k)*kernel_pixels[u].green;
pixel.blue+=(*k)*kernel_pixels[u].blue;
k++;
}
kernel_pixels+=image->columns+width;
}
if ((channel & RedChannel) != 0)
SetRedPixelComponent(q,ClampRedPixelComponent(&pixel));
if ((channel & GreenChannel) != 0)
SetGreenPixelComponent(q,ClampGreenPixelComponent(&pixel));
if ((channel & BlueChannel) != 0)
SetBluePixelComponent(q,ClampBluePixelComponent(&pixel));
if ((channel & OpacityChannel) != 0)
{
k=normal_kernel;
kernel_pixels=p;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
pixel.opacity+=(*k)*kernel_pixels[u].opacity;
k++;
}
kernel_pixels+=image->columns+width;
}
SetOpacityPixelComponent(q,ClampOpacityPixelComponent(&pixel));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
register const IndexPacket
*restrict kernel_indexes;
k=normal_kernel;
kernel_indexes=indexes;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
pixel.index+=(*k)*kernel_indexes[u];
k++;
}
kernel_indexes+=image->columns+width;
}
convolve_indexes[x]=ClampToQuantum(pixel.index);
}
}
else
{
MagickRealType
alpha,
gamma;
gamma=0.0;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
alpha=(MagickRealType) (QuantumScale*(QuantumRange-
kernel_pixels[u].opacity));
pixel.red+=(*k)*alpha*kernel_pixels[u].red;
pixel.green+=(*k)*alpha*kernel_pixels[u].green;
pixel.blue+=(*k)*alpha*kernel_pixels[u].blue;
gamma+=(*k)*alpha;
k++;
}
kernel_pixels+=image->columns+width;
}
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
if ((channel & RedChannel) != 0)
q->red=ClampToQuantum(gamma*GetRedPixelComponent(&pixel));
if ((channel & GreenChannel) != 0)
q->green=ClampToQuantum(gamma*GetGreenPixelComponent(&pixel));
if ((channel & BlueChannel) != 0)
q->blue=ClampToQuantum(gamma*GetBluePixelComponent(&pixel));
if ((channel & OpacityChannel) != 0)
{
k=normal_kernel;
kernel_pixels=p;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
pixel.opacity+=(*k)*kernel_pixels[u].opacity;
k++;
}
kernel_pixels+=image->columns+width;
}
SetOpacityPixelComponent(q,ClampOpacityPixelComponent(&pixel));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
register const IndexPacket
*restrict kernel_indexes;
k=normal_kernel;
kernel_pixels=p;
kernel_indexes=indexes;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
alpha=(MagickRealType) (QuantumScale*(QuantumRange-
kernel_pixels[u].opacity));
pixel.index+=(*k)*alpha*kernel_indexes[u];
k++;
}
kernel_pixels+=image->columns+width;
kernel_indexes+=image->columns+width;
}
convolve_indexes[x]=ClampToQuantum(gamma*
GetIndexPixelComponent(&pixel));
}
}
indexes++;
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(convolve_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ConvolveImageChannel)
#endif
proceed=SetImageProgress(image,ConvolveImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
convolve_image->type=image->type;
convolve_view=DestroyCacheView(convolve_view);
image_view=DestroyCacheView(image_view);
normal_kernel=(double *) RelinquishMagickMemory(normal_kernel);
if (status == MagickFalse)
convolve_image=DestroyImage(convolve_image);
return(convolve_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s p e c k l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DespeckleImage() reduces the speckle noise in an image while perserving the
% edges of the original image.
%
% The format of the DespeckleImage method is:
%
% Image *DespeckleImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void Hull(const ssize_t x_offset,const ssize_t y_offset,
const size_t columns,const size_t rows,Quantum *f,Quantum *g,
const int polarity)
{
MagickRealType
v;
register Quantum
*p,
*q,
*r,
*s;
register ssize_t
x;
ssize_t
y;
assert(f != (Quantum *) NULL);
assert(g != (Quantum *) NULL);
p=f+(columns+2);
q=g+(columns+2);
r=p+(y_offset*((ssize_t) columns+2)+x_offset);
for (y=0; y < (ssize_t) rows; y++)
{
p++;
q++;
r++;
if (polarity > 0)
for (x=(ssize_t) columns; x != 0; x--)
{
v=(MagickRealType) (*p);
if ((MagickRealType) *r >= (v+(MagickRealType) ScaleCharToQuantum(2)))
v+=ScaleCharToQuantum(1);
*q=(Quantum) v;
p++;
q++;
r++;
}
else
for (x=(ssize_t) columns; x != 0; x--)
{
v=(MagickRealType) (*p);
if ((MagickRealType) *r <= (v-(MagickRealType) ScaleCharToQuantum(2)))
v-=(ssize_t) ScaleCharToQuantum(1);
*q=(Quantum) v;
p++;
q++;
r++;
}
p++;
q++;
r++;
}
p=f+(columns+2);
q=g+(columns+2);
r=q+(y_offset*((ssize_t) columns+2)+x_offset);
s=q-(y_offset*((ssize_t) columns+2)+x_offset);
for (y=0; y < (ssize_t) rows; y++)
{
p++;
q++;
r++;
s++;
if (polarity > 0)
for (x=(ssize_t) columns; x != 0; x--)
{
v=(MagickRealType) (*q);
if (((MagickRealType) *s >=
(v+(MagickRealType) ScaleCharToQuantum(2))) &&
((MagickRealType) *r > v))
v+=ScaleCharToQuantum(1);
*p=(Quantum) v;
p++;
q++;
r++;
s++;
}
else
for (x=(ssize_t) columns; x != 0; x--)
{
v=(MagickRealType) (*q);
if (((MagickRealType) *s <=
(v-(MagickRealType) ScaleCharToQuantum(2))) &&
((MagickRealType) *r < v))
v-=(MagickRealType) ScaleCharToQuantum(1);
*p=(Quantum) v;
p++;
q++;
r++;
s++;
}
p++;
q++;
r++;
s++;
}
}
MagickExport Image *DespeckleImage(const Image *image,ExceptionInfo *exception)
{
#define DespeckleImageTag "Despeckle/Image"
CacheView
*despeckle_view,
*image_view;
Image
*despeckle_image;
MagickBooleanType
status;
register ssize_t
i;
Quantum
*restrict buffers,
*restrict pixels;
size_t
length,
number_channels;
static const ssize_t
X[4] = {0, 1, 1,-1},
Y[4] = {1, 0, 1, 1};
/*
Allocate despeckled image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
despeckle_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (despeckle_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(despeckle_image,DirectClass) == MagickFalse)
{
InheritException(exception,&despeckle_image->exception);
despeckle_image=DestroyImage(despeckle_image);
return((Image *) NULL);
}
/*
Allocate image buffers.
*/
length=(size_t) ((image->columns+2)*(image->rows+2));
pixels=(Quantum *) AcquireQuantumMemory(length,2*sizeof(*pixels));
buffers=(Quantum *) AcquireQuantumMemory(length,2*sizeof(*pixels));
if ((pixels == (Quantum *) NULL) || (buffers == (Quantum *) NULL))
{
if (buffers != (Quantum *) NULL)
buffers=(Quantum *) RelinquishMagickMemory(buffers);
if (pixels != (Quantum *) NULL)
pixels=(Quantum *) RelinquishMagickMemory(pixels);
despeckle_image=DestroyImage(despeckle_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Reduce speckle in the image.
*/
status=MagickTrue;
number_channels=(size_t) (image->colorspace == CMYKColorspace ? 5 : 4);
image_view=AcquireCacheView(image);
despeckle_view=AcquireCacheView(despeckle_image);
for (i=0; i < (ssize_t) number_channels; i++)
{
register Quantum
*buffer,
*pixel;
register ssize_t
k,
x;
ssize_t
j,
y;
if (status == MagickFalse)
continue;
pixel=pixels;
(void) ResetMagickMemory(pixel,0,length*sizeof(*pixel));
buffer=buffers;
j=(ssize_t) image->columns+2;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
j++;
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (i)
{
case 0: pixel[j]=GetRedPixelComponent(p); break;
case 1: pixel[j]=GetGreenPixelComponent(p); break;
case 2: pixel[j]=GetBluePixelComponent(p); break;
case 3: pixel[j]=GetOpacityPixelComponent(p); break;
case 4: pixel[j]=GetBlackPixelComponent(indexes,x); break;
default: break;
}
p++;
j++;
}
j++;
}
(void) ResetMagickMemory(buffer,0,length*sizeof(*buffer));
for (k=0; k < 4; k++)
{
Hull(X[k],Y[k],image->columns,image->rows,pixel,buffer,1);
Hull(-X[k],-Y[k],image->columns,image->rows,pixel,buffer,1);
Hull(-X[k],-Y[k],image->columns,image->rows,pixel,buffer,-1);
Hull(X[k],Y[k],image->columns,image->rows,pixel,buffer,-1);
}
j=(ssize_t) image->columns+2;
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
q=GetCacheViewAuthenticPixels(despeckle_view,0,y,despeckle_image->columns,
1,exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
j++;
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (i)
{
case 0: q->red=pixel[j]; break;
case 1: q->green=pixel[j]; break;
case 2: q->blue=pixel[j]; break;
case 3: q->opacity=pixel[j]; break;
case 4: indexes[x]=pixel[j]; break;
default: break;
}
q++;
j++;
}
sync=SyncCacheViewAuthenticPixels(despeckle_view,exception);
if (sync == MagickFalse)
{
status=MagickFalse;
break;
}
j++;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DespeckleImageTag,(MagickOffsetType) i,
number_channels);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
despeckle_view=DestroyCacheView(despeckle_view);
image_view=DestroyCacheView(image_view);
buffers=(Quantum *) RelinquishMagickMemory(buffers);
pixels=(Quantum *) RelinquishMagickMemory(pixels);
despeckle_image->type=image->type;
if (status == MagickFalse)
despeckle_image=DestroyImage(despeckle_image);
return(despeckle_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E d g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EdgeImage() finds edges in an image. Radius defines the radius of the
% convolution filter. Use a radius of 0 and EdgeImage() selects a suitable
% radius for you.
%
% The format of the EdgeImage method is:
%
% Image *EdgeImage(const Image *image,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EdgeImage(const Image *image,const double radius,
ExceptionInfo *exception)
{
Image
*edge_image;
double
*kernel;
register ssize_t
i;
size_t
width;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
width=GetOptimalKernelWidth1D(radius,0.5);
kernel=(double *) AcquireQuantumMemory((size_t) width,width*sizeof(*kernel));
if (kernel == (double *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) (width*width); i++)
kernel[i]=(-1.0);
kernel[i/2]=(double) (width*width-1.0);
edge_image=ConvolveImage(image,width,kernel,exception);
kernel=(double *) RelinquishMagickMemory(kernel);
return(edge_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E m b o s s I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EmbossImage() returns a grayscale image with a three-dimensional effect.
% We convolve the image with a Gaussian operator of the given radius and
% standard deviation (sigma). For reasonable results, radius should be
% larger than sigma. Use a radius of 0 and Emboss() selects a suitable
% radius for you.
%
% The format of the EmbossImage method is:
%
% Image *EmbossImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EmbossImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
double
*kernel;
Image
*emboss_image;
register ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
width=GetOptimalKernelWidth2D(radius,sigma);
kernel=(double *) AcquireQuantumMemory((size_t) width,width*sizeof(*kernel));
if (kernel == (double *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
j=(ssize_t) width/2;
k=j;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel[i]=(double) (((u < 0) || (v < 0) ? -8.0 : 8.0)*
exp(-((double) u*u+v*v)/(2.0*MagickSigma*MagickSigma))/
(2.0*MagickPI*MagickSigma*MagickSigma));
if (u != k)
kernel[i]=0.0;
i++;
}
k--;
}
emboss_image=ConvolveImage(image,width,kernel,exception);
if (emboss_image != (Image *) NULL)
(void) EqualizeImage(emboss_image);
kernel=(double *) RelinquishMagickMemory(kernel);
return(emboss_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F i l t e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FilterImage() applies a custom convolution kernel to the image.
%
% The format of the FilterImage method is:
%
% Image *FilterImage(const Image *image,const KernelInfo *kernel,
% ExceptionInfo *exception)
% Image *FilterImageChannel(const Image *image,const ChannelType channel,
% const KernelInfo *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o kernel: the filtering kernel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FilterImage(const Image *image,const KernelInfo *kernel,
ExceptionInfo *exception)
{
Image
*filter_image;
filter_image=FilterImageChannel(image,DefaultChannels,kernel,exception);
return(filter_image);
}
MagickExport Image *FilterImageChannel(const Image *image,
const ChannelType channel,const KernelInfo *kernel,ExceptionInfo *exception)
{
#define FilterImageTag "Filter/Image"
CacheView
*filter_view,
*image_view;
Image
*filter_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
ssize_t
y;
/*
Initialize filter image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((kernel->width % 2) == 0)
ThrowImageException(OptionError,"KernelWidthMustBeAnOddNumber");
filter_image=CloneImage(image,0,0,MagickTrue,exception);
if (filter_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(filter_image,DirectClass) == MagickFalse)
{
InheritException(exception,&filter_image->exception);
filter_image=DestroyImage(filter_image);
return((Image *) NULL);
}
if (image->debug != MagickFalse)
{
char
format[MaxTextExtent],
*message;
register const double
*k;
ssize_t
u,
v;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" FilterImage with %.20gx%.20g kernel:",(double) kernel->width,(double)
kernel->height);
message=AcquireString("");
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
*message='\0';
(void) FormatMagickString(format,MaxTextExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < (ssize_t) kernel->width; u++)
{
(void) FormatMagickString(format,MaxTextExtent,"%g ",*k++);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
status=AccelerateConvolveImage(image,kernel,filter_image,exception);
if (status == MagickTrue)
return(filter_image);
/*
Filter image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
SetMagickPixelPacketBias(image,&bias);
image_view=AcquireCacheView(image);
filter_view=AcquireCacheView(filter_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict filter_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) kernel->width/2L),
y-(ssize_t) (kernel->height/2L),image->columns+kernel->width,
kernel->height,exception);
q=GetCacheViewAuthenticPixels(filter_view,0,y,filter_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
filter_indexes=GetCacheViewAuthenticIndexQueue(filter_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickPixelPacket
pixel;
register const double
*restrict k;
register const PixelPacket
*restrict kernel_pixels;
register ssize_t
u;
ssize_t
v;
pixel=bias;
k=kernel->values;
kernel_pixels=p;
if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse))
{
for (v=0; v < (ssize_t) kernel->width; v++)
{
for (u=0; u < (ssize_t) kernel->height; u++)
{
pixel.red+=(*k)*kernel_pixels[u].red;
pixel.green+=(*k)*kernel_pixels[u].green;
pixel.blue+=(*k)*kernel_pixels[u].blue;
k++;
}
kernel_pixels+=image->columns+kernel->width;
}
if ((channel & RedChannel) != 0)
SetRedPixelComponent(q,ClampRedPixelComponent(&pixel));
if ((channel & GreenChannel) != 0)
SetGreenPixelComponent(q,ClampGreenPixelComponent(&pixel));
if ((channel & BlueChannel) != 0)
SetBluePixelComponent(q,ClampBluePixelComponent(&pixel));
if ((channel & OpacityChannel) != 0)
{
k=kernel->values;
kernel_pixels=p;
for (v=0; v < (ssize_t) kernel->width; v++)
{
for (u=0; u < (ssize_t) kernel->height; u++)
{
pixel.opacity+=(*k)*kernel_pixels[u].opacity;
k++;
}
kernel_pixels+=image->columns+kernel->width;
}
SetOpacityPixelComponent(q,ClampOpacityPixelComponent(&pixel));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
register const IndexPacket
*restrict kernel_indexes;
k=kernel->values;
kernel_indexes=indexes;
for (v=0; v < (ssize_t) kernel->width; v++)
{
for (u=0; u < (ssize_t) kernel->height; u++)
{
pixel.index+=(*k)*kernel_indexes[u];
k++;
}
kernel_indexes+=image->columns+kernel->width;
}
filter_indexes[x]=ClampToQuantum(pixel.index);
}
}
else
{
MagickRealType
alpha,
gamma;
gamma=0.0;
for (v=0; v < (ssize_t) kernel->width; v++)
{
for (u=0; u < (ssize_t) kernel->height; u++)
{
alpha=(MagickRealType) (QuantumScale*(QuantumRange-
kernel_pixels[u].opacity));
pixel.red+=(*k)*alpha*kernel_pixels[u].red;
pixel.green+=(*k)*alpha*kernel_pixels[u].green;
pixel.blue+=(*k)*alpha*kernel_pixels[u].blue;
gamma+=(*k)*alpha;
k++;
}
kernel_pixels+=image->columns+kernel->width;
}
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
if ((channel & RedChannel) != 0)
q->red=ClampToQuantum(gamma*GetRedPixelComponent(&pixel));
if ((channel & GreenChannel) != 0)
q->green=ClampToQuantum(gamma*GetGreenPixelComponent(&pixel));
if ((channel & BlueChannel) != 0)
q->blue=ClampToQuantum(gamma*GetBluePixelComponent(&pixel));
if ((channel & OpacityChannel) != 0)
{
k=kernel->values;
kernel_pixels=p;
for (v=0; v < (ssize_t) kernel->width; v++)
{
for (u=0; u < (ssize_t) kernel->height; u++)
{
pixel.opacity+=(*k)*kernel_pixels[u].opacity;
k++;
}
kernel_pixels+=image->columns+kernel->width;
}
SetOpacityPixelComponent(q,ClampOpacityPixelComponent(&pixel));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
register const IndexPacket
*restrict kernel_indexes;
k=kernel->values;
kernel_pixels=p;
kernel_indexes=indexes;
for (v=0; v < (ssize_t) kernel->width; v++)
{
for (u=0; u < (ssize_t) kernel->height; u++)
{
alpha=(MagickRealType) (QuantumScale*(QuantumRange-
kernel_pixels[u].opacity));
pixel.index+=(*k)*alpha*kernel_indexes[u];
k++;
}
kernel_pixels+=image->columns+kernel->width;
kernel_indexes+=image->columns+kernel->width;
}
filter_indexes[x]=ClampToQuantum(gamma*
GetIndexPixelComponent(&pixel));
}
}
indexes++;
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(filter_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FilterImageChannel)
#endif
proceed=SetImageProgress(image,FilterImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
filter_image->type=image->type;
filter_view=DestroyCacheView(filter_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
filter_image=DestroyImage(filter_image);
return(filter_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a u s s i a n B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GaussianBlurImage() blurs an image. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma).
% For reasonable results, the radius should be larger than sigma. Use a
% radius of 0 and GaussianBlurImage() selects a suitable radius for you
%
% The format of the GaussianBlurImage method is:
%
% Image *GaussianBlurImage(const Image *image,onst double radius,
% const double sigma,ExceptionInfo *exception)
% Image *GaussianBlurImageChannel(const Image *image,
% const ChannelType channel,const double radius,const double sigma,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *GaussianBlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*blur_image;
blur_image=GaussianBlurImageChannel(image,DefaultChannels,radius,sigma,
exception);
return(blur_image);
}
MagickExport Image *GaussianBlurImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
ExceptionInfo *exception)
{
double
*kernel;
Image
*blur_image;
register ssize_t
i;
size_t
width;
ssize_t
j,
u,
v;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
width=GetOptimalKernelWidth2D(radius,sigma);
kernel=(double *) AcquireQuantumMemory((size_t) width,width*sizeof(*kernel));
if (kernel == (double *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
j=(ssize_t) width/2;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
kernel[i++]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
}
blur_image=ConvolveImageChannel(image,channel,width,kernel,exception);
kernel=(double *) RelinquishMagickMemory(kernel);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o t i o n B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MotionBlurImage() simulates motion blur. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma).
% For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and MotionBlurImage() selects a suitable radius for you.
% Angle gives the angle of the blurring motion.
%
% Andrew Protano contributed this effect.
%
% The format of the MotionBlurImage method is:
%
% Image *MotionBlurImage(const Image *image,const double radius,
% const double sigma,const double angle,ExceptionInfo *exception)
% Image *MotionBlurImageChannel(const Image *image,const ChannelType channel,
% const double radius,const double sigma,const double angle,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% o radius: the radius of the Gaussian, in pixels, not counting
% the center pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o angle: Apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double *GetMotionBlurKernel(const size_t width,const double sigma)
{
double
*kernel,
normalize;
register ssize_t
i;
/*
Generate a 1-D convolution kernel.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
kernel=(double *) AcquireQuantumMemory((size_t) width,sizeof(*kernel));
if (kernel == (double *) NULL)
return(kernel);
normalize=0.0;
for (i=0; i < (ssize_t) width; i++)
{
kernel[i]=(double) (exp((-((double) i*i)/(double) (2.0*MagickSigma*
MagickSigma)))/(MagickSQ2PI*MagickSigma));
normalize+=kernel[i];
}
for (i=0; i < (ssize_t) width; i++)
kernel[i]/=normalize;
return(kernel);
}
MagickExport Image *MotionBlurImage(const Image *image,const double radius,
const double sigma,const double angle,ExceptionInfo *exception)
{
Image
*motion_blur;
motion_blur=MotionBlurImageChannel(image,DefaultChannels,radius,sigma,angle,
exception);
return(motion_blur);
}
MagickExport Image *MotionBlurImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
const double angle,ExceptionInfo *exception)
{
CacheView
*blur_view,
*image_view;
double
*kernel;
Image
*blur_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
OffsetInfo
*offset;
PointInfo
point;
register ssize_t
i;
size_t
width;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel=GetMotionBlurKernel(width,sigma);
if (kernel == (double *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
offset=(OffsetInfo *) AcquireQuantumMemory(width,sizeof(*offset));
if (offset == (OffsetInfo *) NULL)
{
kernel=(double *) RelinquishMagickMemory(kernel);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
{
kernel=(double *) RelinquishMagickMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
return((Image *) NULL);
}
if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse)
{
kernel=(double *) RelinquishMagickMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
InheritException(exception,&blur_image->exception);
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
point.x=(double) width*sin(DegreesToRadians(angle));
point.y=(double) width*cos(DegreesToRadians(angle));
for (i=0; i < (ssize_t) width; i++)
{
offset[i].x=(ssize_t) ceil((double) (i*point.y)/hypot(point.x,point.y)-0.5);
offset[i].y=(ssize_t) ceil((double) (i*point.x)/hypot(point.x,point.y)-0.5);
}
/*
Motion blur image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
image_view=AcquireCacheView(image);
blur_view=AcquireCacheView(blur_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status) omp_throttle(1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict blur_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickPixelPacket
qixel;
PixelPacket
pixel;
register const IndexPacket
*restrict indexes;
register double
*restrict k;
register ssize_t
i;
k=kernel;
qixel=bias;
if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse))
{
for (i=0; i < (ssize_t) width; i++)
{
(void) GetOneCacheViewVirtualPixel(image_view,x+offset[i].x,y+
offset[i].y,&pixel,exception);
qixel.red+=(*k)*pixel.red;
qixel.green+=(*k)*pixel.green;
qixel.blue+=(*k)*pixel.blue;
qixel.opacity+=(*k)*pixel.opacity;
if (image->colorspace == CMYKColorspace)
{
indexes=GetCacheViewVirtualIndexQueue(image_view);
qixel.index+=(*k)*(*indexes);
}
k++;
}
if ((channel & RedChannel) != 0)
q->red=ClampToQuantum(qixel.red);
if ((channel & GreenChannel) != 0)
q->green=ClampToQuantum(qixel.green);
if ((channel & BlueChannel) != 0)
q->blue=ClampToQuantum(qixel.blue);
if ((channel & OpacityChannel) != 0)
q->opacity=ClampToQuantum(qixel.opacity);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
blur_indexes[x]=(IndexPacket) ClampToQuantum(qixel.index);
}
else
{
MagickRealType
alpha,
gamma;
alpha=0.0;
gamma=0.0;
for (i=0; i < (ssize_t) width; i++)
{
(void) GetOneCacheViewVirtualPixel(image_view,x+offset[i].x,y+
offset[i].y,&pixel,exception);
alpha=(MagickRealType) (QuantumScale*
GetAlphaPixelComponent(&pixel));
qixel.red+=(*k)*alpha*pixel.red;
qixel.green+=(*k)*alpha*pixel.green;
qixel.blue+=(*k)*alpha*pixel.blue;
qixel.opacity+=(*k)*pixel.opacity;
if (image->colorspace == CMYKColorspace)
{
indexes=GetCacheViewVirtualIndexQueue(image_view);
qixel.index+=(*k)*alpha*(*indexes);
}
gamma+=(*k)*alpha;
k++;
}
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
if ((channel & RedChannel) != 0)
q->red=ClampToQuantum(gamma*qixel.red);
if ((channel & GreenChannel) != 0)
q->green=ClampToQuantum(gamma*qixel.green);
if ((channel & BlueChannel) != 0)
q->blue=ClampToQuantum(gamma*qixel.blue);
if ((channel & OpacityChannel) != 0)
q->opacity=ClampToQuantum(qixel.opacity);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
blur_indexes[x]=(IndexPacket) ClampToQuantum(gamma*qixel.index);
}
q++;
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MotionBlurImageChannel)
#endif
proceed=SetImageProgress(image,BlurImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_view=DestroyCacheView(blur_view);
image_view=DestroyCacheView(image_view);
kernel=(double *) RelinquishMagickMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P r e v i e w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PreviewImage() tiles 9 thumbnails of the specified image with an image
% processing operation applied with varying parameters. This may be helpful
% pin-pointing an appropriate parameter for a particular image processing
% operation.
%
% The format of the PreviewImages method is:
%
% Image *PreviewImages(const Image *image,const PreviewType preview,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o preview: the image processing operation.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PreviewImage(const Image *image,const PreviewType preview,
ExceptionInfo *exception)
{
#define NumberTiles 9
#define PreviewImageTag "Preview/Image"
#define DefaultPreviewGeometry "204x204+10+10"
char
factor[MaxTextExtent],
label[MaxTextExtent];
double
degrees,
gamma,
percentage,
radius,
sigma,
threshold;
Image
*images,
*montage_image,
*preview_image,
*thumbnail;
ImageInfo
*preview_info;
MagickBooleanType
proceed;
MontageInfo
*montage_info;
QuantizeInfo
quantize_info;
RectangleInfo
geometry;
register ssize_t
i,
x;
size_t
colors;
ssize_t
y;
/*
Open output image file.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
colors=2;
degrees=0.0;
gamma=(-0.2f);
preview_info=AcquireImageInfo();
SetGeometry(image,&geometry);
(void) ParseMetaGeometry(DefaultPreviewGeometry,&geometry.x,&geometry.y,
&geometry.width,&geometry.height);
images=NewImageList();
percentage=12.5;
GetQuantizeInfo(&quantize_info);
radius=0.0;
sigma=1.0;
threshold=0.0;
x=0;
y=0;
for (i=0; i < NumberTiles; i++)
{
thumbnail=ThumbnailImage(image,geometry.width,geometry.height,exception);
if (thumbnail == (Image *) NULL)
break;
(void) SetImageProgressMonitor(thumbnail,(MagickProgressMonitor) NULL,
(void *) NULL);
(void) SetImageProperty(thumbnail,"label",DefaultTileLabel);
if (i == (NumberTiles/2))
{
(void) QueryColorDatabase("#dfdfdf",&thumbnail->matte_color,exception);
AppendImageToList(&images,thumbnail);
continue;
}
switch (preview)
{
case RotatePreview:
{
degrees+=45.0;
preview_image=RotateImage(thumbnail,degrees,exception);
(void) FormatMagickString(label,MaxTextExtent,"rotate %g",degrees);
break;
}
case ShearPreview:
{
degrees+=5.0;
preview_image=ShearImage(thumbnail,degrees,degrees,exception);
(void) FormatMagickString(label,MaxTextExtent,"shear %gx%g",
degrees,2.0*degrees);
break;
}
case RollPreview:
{
x=(ssize_t) ((i+1)*thumbnail->columns)/NumberTiles;
y=(ssize_t) ((i+1)*thumbnail->rows)/NumberTiles;
preview_image=RollImage(thumbnail,x,y,exception);
(void) FormatMagickString(label,MaxTextExtent,"roll %+.20gx%+.20g",
(double) x,(double) y);
break;
}
case HuePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatMagickString(factor,MaxTextExtent,"100,100,%g",
2.0*percentage);
(void) ModulateImage(preview_image,factor);
(void) FormatMagickString(label,MaxTextExtent,"modulate %s",factor);
break;
}
case SaturationPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatMagickString(factor,MaxTextExtent,"100,%g",
2.0*percentage);
(void) ModulateImage(preview_image,factor);
(void) FormatMagickString(label,MaxTextExtent,"modulate %s",factor);
break;
}
case BrightnessPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatMagickString(factor,MaxTextExtent,"%g",2.0*percentage);
(void) ModulateImage(preview_image,factor);
(void) FormatMagickString(label,MaxTextExtent,"modulate %s",factor);
break;
}
case GammaPreview:
default:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
gamma+=0.4f;
(void) GammaImageChannel(preview_image,DefaultChannels,gamma);
(void) FormatMagickString(label,MaxTextExtent,"gamma %g",gamma);
break;
}
case SpiffPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image != (Image *) NULL)
for (x=0; x < i; x++)
(void) ContrastImage(preview_image,MagickTrue);
(void) FormatMagickString(label,MaxTextExtent,"contrast (%.20g)",
(double) i+1);
break;
}
case DullPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
for (x=0; x < i; x++)
(void) ContrastImage(preview_image,MagickFalse);
(void) FormatMagickString(label,MaxTextExtent,"+contrast (%.20g)",
(double) i+1);
break;
}
case GrayscalePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
colors<<=1;
quantize_info.number_colors=colors;
quantize_info.colorspace=GRAYColorspace;
(void) QuantizeImage(&quantize_info,preview_image);
(void) FormatMagickString(label,MaxTextExtent,
"-colorspace gray -colors %.20g",(double) colors);
break;
}
case QuantizePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
colors<<=1;
quantize_info.number_colors=colors;
(void) QuantizeImage(&quantize_info,preview_image);
(void) FormatMagickString(label,MaxTextExtent,"colors %.20g",(double)
colors);
break;
}
case DespecklePreview:
{
for (x=0; x < (i-1); x++)
{
preview_image=DespeckleImage(thumbnail,exception);
if (preview_image == (Image *) NULL)
break;
thumbnail=DestroyImage(thumbnail);
thumbnail=preview_image;
}
preview_image=DespeckleImage(thumbnail,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatMagickString(label,MaxTextExtent,"despeckle (%.20g)",
(double) i+1);
break;
}
case ReduceNoisePreview:
{
preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) radius,
(size_t) radius,exception);
(void) FormatMagickString(label,MaxTextExtent,"noise %g",radius);
break;
}
case AddNoisePreview:
{
switch ((int) i)
{
case 0:
{
(void) CopyMagickString(factor,"uniform",MaxTextExtent);
break;
}
case 1:
{
(void) CopyMagickString(factor,"gaussian",MaxTextExtent);
break;
}
case 2:
{
(void) CopyMagickString(factor,"multiplicative",MaxTextExtent);
break;
}
case 3:
{
(void) CopyMagickString(factor,"impulse",MaxTextExtent);
break;
}
case 4:
{
(void) CopyMagickString(factor,"laplacian",MaxTextExtent);
break;
}
case 5:
{
(void) CopyMagickString(factor,"Poisson",MaxTextExtent);
break;
}
default:
{
(void) CopyMagickString(thumbnail->magick,"NULL",MaxTextExtent);
break;
}
}
preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) i,
(size_t) i,exception);
(void) FormatMagickString(label,MaxTextExtent,"+noise %s",factor);
break;
}
case SharpenPreview:
{
preview_image=SharpenImage(thumbnail,radius,sigma,exception);
(void) FormatMagickString(label,MaxTextExtent,"sharpen %gx%g",
radius,sigma);
break;
}
case BlurPreview:
{
preview_image=BlurImage(thumbnail,radius,sigma,exception);
(void) FormatMagickString(label,MaxTextExtent,"blur %gx%g",radius,
sigma);
break;
}
case ThresholdPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) BilevelImage(thumbnail,
(double) (percentage*((MagickRealType) QuantumRange+1.0))/100.0);
(void) FormatMagickString(label,MaxTextExtent,"threshold %g",
(double) (percentage*((MagickRealType) QuantumRange+1.0))/100.0);
break;
}
case EdgeDetectPreview:
{
preview_image=EdgeImage(thumbnail,radius,exception);
(void) FormatMagickString(label,MaxTextExtent,"edge %g",radius);
break;
}
case SpreadPreview:
{
preview_image=SpreadImage(thumbnail,radius,exception);
(void) FormatMagickString(label,MaxTextExtent,"spread %g",
radius+0.5);
break;
}
case SolarizePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) SolarizeImage(preview_image,(double) QuantumRange*
percentage/100.0);
(void) FormatMagickString(label,MaxTextExtent,"solarize %g",
(QuantumRange*percentage)/100.0);
break;
}
case ShadePreview:
{
degrees+=10.0;
preview_image=ShadeImage(thumbnail,MagickTrue,degrees,degrees,
exception);
(void) FormatMagickString(label,MaxTextExtent,"shade %gx%g",
degrees,degrees);
break;
}
case RaisePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
geometry.width=(size_t) (2*i+2);
geometry.height=(size_t) (2*i+2);
geometry.x=i/2;
geometry.y=i/2;
(void) RaiseImage(preview_image,&geometry,MagickTrue);
(void) FormatMagickString(label,MaxTextExtent,
"raise %.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double)
geometry.height,(double) geometry.x,(double) geometry.y);
break;
}
case SegmentPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
threshold+=0.4f;
(void) SegmentImage(preview_image,RGBColorspace,MagickFalse,threshold,
threshold);
(void) FormatMagickString(label,MaxTextExtent,"segment %gx%g",
threshold,threshold);
break;
}
case SwirlPreview:
{
preview_image=SwirlImage(thumbnail,degrees,exception);
(void) FormatMagickString(label,MaxTextExtent,"swirl %g",degrees);
degrees+=45.0;
break;
}
case ImplodePreview:
{
degrees+=0.1f;
preview_image=ImplodeImage(thumbnail,degrees,exception);
(void) FormatMagickString(label,MaxTextExtent,"implode %g",degrees);
break;
}
case WavePreview:
{
degrees+=5.0f;
preview_image=WaveImage(thumbnail,0.5*degrees,2.0*degrees,exception);
(void) FormatMagickString(label,MaxTextExtent,"wave %gx%g",
0.5*degrees,2.0*degrees);
break;
}
case OilPaintPreview:
{
preview_image=OilPaintImage(thumbnail,(double) radius,exception);
(void) FormatMagickString(label,MaxTextExtent,"paint %g",radius);
break;
}
case CharcoalDrawingPreview:
{
preview_image=CharcoalImage(thumbnail,(double) radius,(double) sigma,
exception);
(void) FormatMagickString(label,MaxTextExtent,"charcoal %gx%g",
radius,sigma);
break;
}
case JPEGPreview:
{
char
filename[MaxTextExtent];
int
file;
MagickBooleanType
status;
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
preview_info->quality=(size_t) percentage;
(void) FormatMagickString(factor,MaxTextExtent,"%.20g",(double)
preview_info->quality);
file=AcquireUniqueFileResource(filename);
if (file != -1)
file=close(file)-1;
(void) FormatMagickString(preview_image->filename,MaxTextExtent,
"jpeg:%s",filename);
status=WriteImage(preview_info,preview_image);
if (status != MagickFalse)
{
Image
*quality_image;
(void) CopyMagickString(preview_info->filename,
preview_image->filename,MaxTextExtent);
quality_image=ReadImage(preview_info,exception);
if (quality_image != (Image *) NULL)
{
preview_image=DestroyImage(preview_image);
preview_image=quality_image;
}
}
(void) RelinquishUniqueFileResource(preview_image->filename);
if ((GetBlobSize(preview_image)/1024) >= 1024)
(void) FormatMagickString(label,MaxTextExtent,"quality %s\n%gmb ",
factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/
1024.0/1024.0);
else
if (GetBlobSize(preview_image) >= 1024)
(void) FormatMagickString(label,MaxTextExtent,
"quality %s\n%gkb ",factor,(double) ((MagickOffsetType)
GetBlobSize(preview_image))/1024.0);
else
(void) FormatMagickString(label,MaxTextExtent,"quality %s\n%.20gb ",
factor,(double) GetBlobSize(thumbnail));
break;
}
}
thumbnail=DestroyImage(thumbnail);
percentage+=12.5;
radius+=0.5;
sigma+=0.25;
if (preview_image == (Image *) NULL)
break;
(void) DeleteImageProperty(preview_image,"label");
(void) SetImageProperty(preview_image,"label",label);
AppendImageToList(&images,preview_image);
proceed=SetImageProgress(image,PreviewImageTag,(MagickOffsetType) i,
NumberTiles);
if (proceed == MagickFalse)
break;
}
if (images == (Image *) NULL)
{
preview_info=DestroyImageInfo(preview_info);
return((Image *) NULL);
}
/*
Create the montage.
*/
montage_info=CloneMontageInfo(preview_info,(MontageInfo *) NULL);
(void) CopyMagickString(montage_info->filename,image->filename,MaxTextExtent);
montage_info->shadow=MagickTrue;
(void) CloneString(&montage_info->tile,"3x3");
(void) CloneString(&montage_info->geometry,DefaultPreviewGeometry);
(void) CloneString(&montage_info->frame,DefaultTileFrame);
montage_image=MontageImages(images,montage_info,exception);
montage_info=DestroyMontageInfo(montage_info);
images=DestroyImageList(images);
if (montage_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
if (montage_image->montage != (char *) NULL)
{
/*
Free image directory.
*/
montage_image->montage=(char *) RelinquishMagickMemory(
montage_image->montage);
if (image->directory != (char *) NULL)
montage_image->directory=(char *) RelinquishMagickMemory(
montage_image->directory);
}
preview_info=DestroyImageInfo(preview_info);
return(montage_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a d i a l B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RadialBlurImage() applies a radial blur to the image.
%
% Andrew Protano contributed this effect.
%
% The format of the RadialBlurImage method is:
%
% Image *RadialBlurImage(const Image *image,const double angle,
% ExceptionInfo *exception)
% Image *RadialBlurImageChannel(const Image *image,const ChannelType channel,
% const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o angle: the angle of the radial blur.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RadialBlurImage(const Image *image,const double angle,
ExceptionInfo *exception)
{
Image
*blur_image;
blur_image=RadialBlurImageChannel(image,DefaultChannels,angle,exception);
return(blur_image);
}
MagickExport Image *RadialBlurImageChannel(const Image *image,
const ChannelType channel,const double angle,ExceptionInfo *exception)
{
CacheView
*blur_view,
*image_view;
Image
*blur_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
MagickRealType
blur_radius,
*cos_theta,
offset,
*sin_theta,
theta;
PointInfo
blur_center;
register ssize_t
i;
size_t
n;
ssize_t
y;
/*
Allocate blur image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse)
{
InheritException(exception,&blur_image->exception);
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
blur_center.x=(double) image->columns/2.0;
blur_center.y=(double) image->rows/2.0;
blur_radius=hypot(blur_center.x,blur_center.y);
n=(size_t) fabs(4.0*DegreesToRadians(angle)*sqrt((double) blur_radius)+2UL);
theta=DegreesToRadians(angle)/(MagickRealType) (n-1);
cos_theta=(MagickRealType *) AcquireQuantumMemory((size_t) n,
sizeof(*cos_theta));
sin_theta=(MagickRealType *) AcquireQuantumMemory((size_t) n,
sizeof(*sin_theta));
if ((cos_theta == (MagickRealType *) NULL) ||
(sin_theta == (MagickRealType *) NULL))
{
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
offset=theta*(MagickRealType) (n-1)/2.0;
for (i=0; i < (ssize_t) n; i++)
{
cos_theta[i]=cos((double) (theta*i-offset));
sin_theta[i]=sin((double) (theta*i-offset));
}
/*
Radial blur image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
image_view=AcquireCacheView(image);
blur_view=AcquireCacheView(blur_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) blur_image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register IndexPacket
*restrict blur_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view);
for (x=0; x < (ssize_t) blur_image->columns; x++)
{
MagickPixelPacket
qixel;
MagickRealType
normalize,
radius;
PixelPacket
pixel;
PointInfo
center;
register ssize_t
i;
size_t
step;
center.x=(double) x-blur_center.x;
center.y=(double) y-blur_center.y;
radius=hypot((double) center.x,center.y);
if (radius == 0)
step=1;
else
{
step=(size_t) (blur_radius/radius);
if (step == 0)
step=1;
else
if (step >= n)
step=n-1;
}
normalize=0.0;
qixel=bias;
if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse))
{
for (i=0; i < (ssize_t) n; i+=(ssize_t) step)
{
(void) GetOneCacheViewVirtualPixel(image_view,(ssize_t)
(blur_center.x+center.x*cos_theta[i]-center.y*sin_theta[i]+0.5),
(ssize_t) (blur_center.y+center.x*sin_theta[i]+center.y*
cos_theta[i]+0.5),&pixel,exception);
qixel.red+=pixel.red;
qixel.green+=pixel.green;
qixel.blue+=pixel.blue;
qixel.opacity+=pixel.opacity;
if (image->colorspace == CMYKColorspace)
{
indexes=GetCacheViewVirtualIndexQueue(image_view);
qixel.index+=(*indexes);
}
normalize+=1.0;
}
normalize=1.0/(fabs((double) normalize) <= MagickEpsilon ? 1.0 :
normalize);
if ((channel & RedChannel) != 0)
q->red=ClampToQuantum(normalize*qixel.red);
if ((channel & GreenChannel) != 0)
q->green=ClampToQuantum(normalize*qixel.green);
if ((channel & BlueChannel) != 0)
q->blue=ClampToQuantum(normalize*qixel.blue);
if ((channel & OpacityChannel) != 0)
q->opacity=ClampToQuantum(normalize*qixel.opacity);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
blur_indexes[x]=(IndexPacket) ClampToQuantum(normalize*qixel.index);
}
else
{
MagickRealType
alpha,
gamma;
alpha=1.0;
gamma=0.0;
for (i=0; i < (ssize_t) n; i+=(ssize_t) step)
{
(void) GetOneCacheViewVirtualPixel(image_view,(ssize_t)
(blur_center.x+center.x*cos_theta[i]-center.y*sin_theta[i]+0.5),
(ssize_t) (blur_center.y+center.x*sin_theta[i]+center.y*
cos_theta[i]+0.5),&pixel,exception);
alpha=(MagickRealType) (QuantumScale*
GetAlphaPixelComponent(&pixel));
qixel.red+=alpha*pixel.red;
qixel.green+=alpha*pixel.green;
qixel.blue+=alpha*pixel.blue;
qixel.opacity+=pixel.opacity;
if (image->colorspace == CMYKColorspace)
{
indexes=GetCacheViewVirtualIndexQueue(image_view);
qixel.index+=alpha*(*indexes);
}
gamma+=alpha;
normalize+=1.0;
}
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
normalize=1.0/(fabs((double) normalize) <= MagickEpsilon ? 1.0 :
normalize);
if ((channel & RedChannel) != 0)
q->red=ClampToQuantum(gamma*qixel.red);
if ((channel & GreenChannel) != 0)
q->green=ClampToQuantum(gamma*qixel.green);
if ((channel & BlueChannel) != 0)
q->blue=ClampToQuantum(gamma*qixel.blue);
if ((channel & OpacityChannel) != 0)
q->opacity=ClampToQuantum(normalize*qixel.opacity);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
blur_indexes[x]=(IndexPacket) ClampToQuantum(gamma*qixel.index);
}
q++;
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RadialBlurImageChannel)
#endif
proceed=SetImageProgress(image,BlurImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_view=DestroyCacheView(blur_view);
image_view=DestroyCacheView(image_view);
cos_theta=(MagickRealType *) RelinquishMagickMemory(cos_theta);
sin_theta=(MagickRealType *) RelinquishMagickMemory(sin_theta);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e l e c t i v e B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SelectiveBlurImage() selectively blur pixels within a contrast threshold.
% It is similar to the unsharpen mask that sharpens everything with contrast
% above a certain threshold.
%
% The format of the SelectiveBlurImage method is:
%
% Image *SelectiveBlurImage(const Image *image,const double radius,
% const double sigma,const double threshold,ExceptionInfo *exception)
% Image *SelectiveBlurImageChannel(const Image *image,
% const ChannelType channel,const double radius,const double sigma,
% const double threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o threshold: only pixels within this contrast threshold are included
% in the blur operation.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType SelectiveContrast(const PixelPacket *p,
const PixelPacket *q,const double threshold)
{
if (fabs(PixelIntensity(p)-PixelIntensity(q)) < threshold)
return(MagickTrue);
return(MagickFalse);
}
MagickExport Image *SelectiveBlurImage(const Image *image,const double radius,
const double sigma,const double threshold,ExceptionInfo *exception)
{
Image
*blur_image;
blur_image=SelectiveBlurImageChannel(image,DefaultChannels,radius,sigma,
threshold,exception);
return(blur_image);
}
MagickExport Image *SelectiveBlurImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
const double threshold,ExceptionInfo *exception)
{
#define SelectiveBlurImageTag "SelectiveBlur/Image"
CacheView
*blur_view,
*image_view;
double
*kernel;
Image
*blur_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
register ssize_t
i;
size_t
width;
ssize_t
j,
u,
v,
y;
/*
Initialize blur image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel=(double *) AcquireQuantumMemory((size_t) width,width*sizeof(*kernel));
if (kernel == (double *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
j=(ssize_t) width/2;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
kernel[i++]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
}
if (image->debug != MagickFalse)
{
char
format[MaxTextExtent],
*message;
register const double
*k;
ssize_t
u,
v;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" SelectiveBlurImage with %.20gx%.20g kernel:",(double) width,(double)
width);
message=AcquireString("");
k=kernel;
for (v=0; v < (ssize_t) width; v++)
{
*message='\0';
(void) FormatMagickString(format,MaxTextExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < (ssize_t) width; u++)
{
(void) FormatMagickString(format,MaxTextExtent,"%+f ",*k++);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse)
{
InheritException(exception,&blur_image->exception);
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
/*
Threshold blur image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
SetMagickPixelPacketBias(image,&bias);
image_view=AcquireCacheView(image);
blur_view=AcquireCacheView(blur_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
MagickRealType
gamma;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict blur_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t)
(width/2L),image->columns+width,width,exception);
q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickPixelPacket
pixel;
register const double
*restrict k;
register ssize_t
u;
ssize_t
j,
v;
pixel=bias;
k=kernel;
gamma=0.0;
j=0;
if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse))
{
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
if (SelectiveContrast(p+u+j,q,threshold) != MagickFalse)
{
pixel.red+=(*k)*(p+u+j)->red;
pixel.green+=(*k)*(p+u+j)->green;
pixel.blue+=(*k)*(p+u+j)->blue;
gamma+=(*k);
k++;
}
}
j+=(ssize_t) (image->columns+width);
}
if (gamma != 0.0)
{
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
if ((channel & RedChannel) != 0)
q->red=ClampToQuantum(gamma*GetRedPixelComponent(&pixel));
if ((channel & GreenChannel) != 0)
q->green=ClampToQuantum(gamma*GetGreenPixelComponent(&pixel));
if ((channel & BlueChannel) != 0)
q->blue=ClampToQuantum(gamma*GetBluePixelComponent(&pixel));
}
if ((channel & OpacityChannel) != 0)
{
gamma=0.0;
j=0;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
if (SelectiveContrast(p+u+j,q,threshold) != MagickFalse)
{
pixel.opacity+=(*k)*(p+u+j)->opacity;
gamma+=(*k);
k++;
}
}
j+=(ssize_t) (image->columns+width);
}
if (gamma != 0.0)
{
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 :
gamma);
SetOpacityPixelComponent(q,ClampToQuantum(gamma*
GetOpacityPixelComponent(&pixel)));
}
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
gamma=0.0;
j=0;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
if (SelectiveContrast(p+u+j,q,threshold) != MagickFalse)
{
pixel.index+=(*k)*indexes[x+u+j];
gamma+=(*k);
k++;
}
}
j+=(ssize_t) (image->columns+width);
}
if (gamma != 0.0)
{
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 :
gamma);
blur_indexes[x]=ClampToQuantum(gamma*
GetIndexPixelComponent(&pixel));
}
}
}
else
{
MagickRealType
alpha;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
if (SelectiveContrast(p+u+j,q,threshold) != MagickFalse)
{
alpha=(MagickRealType) (QuantumScale*
GetAlphaPixelComponent(p+u+j));
pixel.red+=(*k)*alpha*(p+u+j)->red;
pixel.green+=(*k)*alpha*(p+u+j)->green;
pixel.blue+=(*k)*alpha*(p+u+j)->blue;
pixel.opacity+=(*k)*(p+u+j)->opacity;
gamma+=(*k)*alpha;
k++;
}
}
j+=(ssize_t) (image->columns+width);
}
if (gamma != 0.0)
{
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
if ((channel & RedChannel) != 0)
q->red=ClampToQuantum(gamma*GetRedPixelComponent(&pixel));
if ((channel & GreenChannel) != 0)
q->green=ClampToQuantum(gamma*GetGreenPixelComponent(&pixel));
if ((channel & BlueChannel) != 0)
q->blue=ClampToQuantum(gamma*GetBluePixelComponent(&pixel));
}
if ((channel & OpacityChannel) != 0)
{
gamma=0.0;
j=0;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
if (SelectiveContrast(p+u+j,q,threshold) != MagickFalse)
{
pixel.opacity+=(*k)*(p+u+j)->opacity;
gamma+=(*k);
k++;
}
}
j+=(ssize_t) (image->columns+width);
}
if (gamma != 0.0)
{
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 :
gamma);
SetOpacityPixelComponent(q,
ClampOpacityPixelComponent(&pixel));
}
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
gamma=0.0;
j=0;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
if (SelectiveContrast(p+u+j,q,threshold) != MagickFalse)
{
alpha=(MagickRealType) (QuantumScale*
GetAlphaPixelComponent(p+u+j));
pixel.index+=(*k)*alpha*indexes[x+u+j];
gamma+=(*k);
k++;
}
}
j+=(ssize_t) (image->columns+width);
}
if (gamma != 0.0)
{
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 :
gamma);
blur_indexes[x]=ClampToQuantum(gamma*
GetIndexPixelComponent(&pixel));
}
}
}
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(blur_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SelectiveBlurImageChannel)
#endif
proceed=SetImageProgress(image,SelectiveBlurImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_image->type=image->type;
blur_view=DestroyCacheView(blur_view);
image_view=DestroyCacheView(image_view);
kernel=(double *) RelinquishMagickMemory(kernel);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShadeImage() shines a distant light on an image to create a
% three-dimensional effect. You control the positioning of the light with
% azimuth and elevation; azimuth is measured in degrees off the x axis
% and elevation is measured in pixels above the Z axis.
%
% The format of the ShadeImage method is:
%
% Image *ShadeImage(const Image *image,const MagickBooleanType gray,
% const double azimuth,const double elevation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o gray: A value other than zero shades the intensity of each pixel.
%
% o azimuth, elevation: Define the light source direction.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShadeImage(const Image *image,const MagickBooleanType gray,
const double azimuth,const double elevation,ExceptionInfo *exception)
{
#define ShadeImageTag "Shade/Image"
CacheView
*image_view,
*shade_view;
Image
*shade_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PrimaryInfo
light;
ssize_t
y;
/*
Initialize shaded image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
shade_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (shade_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(shade_image,DirectClass) == MagickFalse)
{
InheritException(exception,&shade_image->exception);
shade_image=DestroyImage(shade_image);
return((Image *) NULL);
}
/*
Compute the light vector.
*/
light.x=(double) QuantumRange*cos(DegreesToRadians(azimuth))*
cos(DegreesToRadians(elevation));
light.y=(double) QuantumRange*sin(DegreesToRadians(azimuth))*
cos(DegreesToRadians(elevation));
light.z=(double) QuantumRange*sin(DegreesToRadians(elevation));
/*
Shade image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
shade_view=AcquireCacheView(shade_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickRealType
distance,
normal_distance,
shade;
PrimaryInfo
normal;
register const PixelPacket
*restrict p,
*restrict s0,
*restrict s1,
*restrict s2;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-1,y-1,image->columns+2,3,exception);
q=QueueCacheViewAuthenticPixels(shade_view,0,y,shade_image->columns,1,
exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
/*
Shade this row of pixels.
*/
normal.z=2.0*(double) QuantumRange; /* constant Z of surface normal */
s0=p+1;
s1=s0+image->columns+2;
s2=s1+image->columns+2;
for (x=0; x < (ssize_t) image->columns; x++)
{
/*
Determine the surface normal and compute shading.
*/
normal.x=(double) (PixelIntensity(s0-1)+PixelIntensity(s1-1)+
PixelIntensity(s2-1)-PixelIntensity(s0+1)-PixelIntensity(s1+1)-
PixelIntensity(s2+1));
normal.y=(double) (PixelIntensity(s2-1)+PixelIntensity(s2)+
PixelIntensity(s2+1)-PixelIntensity(s0-1)-PixelIntensity(s0)-
PixelIntensity(s0+1));
if ((normal.x == 0.0) && (normal.y == 0.0))
shade=light.z;
else
{
shade=0.0;
distance=normal.x*light.x+normal.y*light.y+normal.z*light.z;
if (distance > MagickEpsilon)
{
normal_distance=
normal.x*normal.x+normal.y*normal.y+normal.z*normal.z;
if (normal_distance > (MagickEpsilon*MagickEpsilon))
shade=distance/sqrt((double) normal_distance);
}
}
if (gray != MagickFalse)
{
q->red=(Quantum) shade;
q->green=(Quantum) shade;
q->blue=(Quantum) shade;
}
else
{
q->red=ClampToQuantum(QuantumScale*shade*s1->red);
q->green=ClampToQuantum(QuantumScale*shade*s1->green);
q->blue=ClampToQuantum(QuantumScale*shade*s1->blue);
}
q->opacity=s1->opacity;
s0++;
s1++;
s2++;
q++;
}
if (SyncCacheViewAuthenticPixels(shade_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ShadeImage)
#endif
proceed=SetImageProgress(image,ShadeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
shade_view=DestroyCacheView(shade_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
shade_image=DestroyImage(shade_image);
return(shade_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a r p e n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SharpenImage() sharpens the image. We convolve the image with a Gaussian
% operator of the given radius and standard deviation (sigma). For
% reasonable results, radius should be larger than sigma. Use a radius of 0
% and SharpenImage() selects a suitable radius for you.
%
% Using a separable kernel would be faster, but the negative weights cancel
% out on the corners of the kernel producing often undesirable ringing in the
% filtered result; this can be avoided by using a 2D gaussian shaped image
% sharpening kernel instead.
%
% The format of the SharpenImage method is:
%
% Image *SharpenImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
% Image *SharpenImageChannel(const Image *image,const ChannelType channel,
% const double radius,const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SharpenImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*sharp_image;
sharp_image=SharpenImageChannel(image,DefaultChannels,radius,sigma,exception);
return(sharp_image);
}
MagickExport Image *SharpenImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
ExceptionInfo *exception)
{
double
*kernel,
normalize;
Image
*sharp_image;
register ssize_t
i;
size_t
width;
ssize_t
j,
u,
v;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
width=GetOptimalKernelWidth2D(radius,sigma);
kernel=(double *) AcquireQuantumMemory((size_t) width*width,sizeof(*kernel));
if (kernel == (double *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
normalize=0.0;
j=(ssize_t) width/2;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel[i]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel[i];
i++;
}
}
kernel[i/2]=(double) ((-2.0)*normalize);
sharp_image=ConvolveImageChannel(image,channel,width,kernel,exception);
kernel=(double *) RelinquishMagickMemory(kernel);
return(sharp_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p r e a d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpreadImage() is a special effects method that randomly displaces each
% pixel in a block defined by the radius parameter.
%
% The format of the SpreadImage method is:
%
% Image *SpreadImage(const Image *image,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: Choose a random pixel in a neighborhood of this extent.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpreadImage(const Image *image,const double radius,
ExceptionInfo *exception)
{
#define SpreadImageTag "Spread/Image"
CacheView
*image_view,
*spread_view;
Image
*spread_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
RandomInfo
**restrict random_info;
size_t
width;
ssize_t
y;
/*
Initialize spread image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
spread_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (spread_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(spread_image,DirectClass) == MagickFalse)
{
InheritException(exception,&spread_image->exception);
spread_image=DestroyImage(spread_image);
return((Image *) NULL);
}
/*
Spread image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(spread_image,&bias);
width=GetOptimalKernelWidth1D(radius,0.5);
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireCacheView(image);
spread_view=AcquireCacheView(spread_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status) omp_throttle(1)
#endif
for (y=0; y < (ssize_t) spread_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickPixelPacket
pixel;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(spread_view,0,y,spread_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(spread_view);
pixel=bias;
for (x=0; x < (ssize_t) spread_image->columns; x++)
{
(void) InterpolateMagickPixelPacket(image,image_view,
UndefinedInterpolatePixel,(double) x+width*(GetPseudoRandomValue(
random_info[id])-0.5),(double) y+width*(GetPseudoRandomValue(
random_info[id])-0.5),&pixel,exception);
SetPixelPacket(spread_image,&pixel,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(spread_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SpreadImage)
#endif
proceed=SetImageProgress(image,SpreadImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
spread_view=DestroyCacheView(spread_view);
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(spread_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t a t i s t i c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StatisticImage() makes each pixel the min / max / median / mode / etc. of
% the neighborhood of the specified width and height.
%
% The format of the StatisticImage method is:
%
% Image *StatisticImage(const Image *image,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
% Image *StatisticImageChannel(const Image *image,
% const ChannelType channel,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the image channel.
%
% o type: the statistic type (median, mode, etc.).
%
% o width: the width of the pixel neighborhood.
%
% o height: the height of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
#define ListChannels 5
typedef struct _ListNode
{
size_t
next[9],
count,
signature;
} ListNode;
typedef struct _SkipList
{
ssize_t
level;
ListNode
*nodes;
} SkipList;
typedef struct _PixelList
{
size_t
length,
seed,
signature;
SkipList
lists[ListChannels];
} PixelList;
static PixelList *DestroyPixelList(PixelList *pixel_list)
{
register ssize_t
i;
if (pixel_list == (PixelList *) NULL)
return((PixelList *) NULL);
for (i=0; i < ListChannels; i++)
if (pixel_list->lists[i].nodes != (ListNode *) NULL)
pixel_list->lists[i].nodes=(ListNode *) RelinquishMagickMemory(
pixel_list->lists[i].nodes);
pixel_list=(PixelList *) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList **DestroyPixelListThreadSet(PixelList **pixel_list)
{
register ssize_t
i;
assert(pixel_list != (PixelList **) NULL);
for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++)
if (pixel_list[i] != (PixelList *) NULL)
pixel_list[i]=DestroyPixelList(pixel_list[i]);
pixel_list=(PixelList **) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList *AcquirePixelList(const size_t width,const size_t height)
{
PixelList
*pixel_list;
register ssize_t
i;
pixel_list=(PixelList *) AcquireMagickMemory(sizeof(*pixel_list));
if (pixel_list == (PixelList *) NULL)
return(pixel_list);
(void) ResetMagickMemory((void *) pixel_list,0,sizeof(*pixel_list));
pixel_list->length=width*height;
for (i=0; i < ListChannels; i++)
{
pixel_list->lists[i].nodes=(ListNode *) AcquireQuantumMemory(65537UL,
sizeof(*pixel_list->lists[i].nodes));
if (pixel_list->lists[i].nodes == (ListNode *) NULL)
return(DestroyPixelList(pixel_list));
(void) ResetMagickMemory(pixel_list->lists[i].nodes,0,65537UL*
sizeof(*pixel_list->lists[i].nodes));
}
pixel_list->signature=MagickSignature;
return(pixel_list);
}
static PixelList **AcquirePixelListThreadSet(const size_t width,
const size_t height)
{
PixelList
**pixel_list;
register ssize_t
i;
size_t
number_threads;
number_threads=GetOpenMPMaximumThreads();
pixel_list=(PixelList **) AcquireQuantumMemory(number_threads,
sizeof(*pixel_list));
if (pixel_list == (PixelList **) NULL)
return((PixelList **) NULL);
(void) ResetMagickMemory(pixel_list,0,number_threads*sizeof(*pixel_list));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_list[i]=AcquirePixelList(width,height);
if (pixel_list[i] == (PixelList *) NULL)
return(DestroyPixelListThreadSet(pixel_list));
}
return(pixel_list);
}
static void AddNodePixelList(PixelList *pixel_list,const ssize_t channel,
const size_t color)
{
register SkipList
*list;
register ssize_t
level;
size_t
search,
update[9];
/*
Initialize the node.
*/
list=pixel_list->lists+channel;
list->nodes[color].signature=pixel_list->signature;
list->nodes[color].count=1;
/*
Determine where it belongs in the list.
*/
search=65536UL;
for (level=list->level; level >= 0; level--)
{
while (list->nodes[search].next[level] < color)
search=list->nodes[search].next[level];
update[level]=search;
}
/*
Generate a pseudo-random level for this node.
*/
for (level=0; ; level++)
{
pixel_list->seed=(pixel_list->seed*42893621L)+1L;
if ((pixel_list->seed & 0x300) != 0x300)
break;
}
if (level > 8)
level=8;
if (level > (list->level+2))
level=list->level+2;
/*
If we're raising the list's level, link back to the root node.
*/
while (level > list->level)
{
list->level++;
update[list->level]=65536UL;
}
/*
Link the node into the skip-list.
*/
do
{
list->nodes[color].next[level]=list->nodes[update[level]].next[level];
list->nodes[update[level]].next[level]=color;
} while (level-- > 0);
}
static MagickPixelPacket GetMaximumPixelList(PixelList *pixel_list)
{
MagickPixelPacket
pixel;
register SkipList
*list;
register ssize_t
channel;
size_t
color,
maximum;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the maximum value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
maximum=list->nodes[color].next[0];
do
{
color=list->nodes[color].next[0];
if (color > maximum)
maximum=color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) maximum;
}
GetMagickPixelPacket((const Image *) NULL,&pixel);
pixel.red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel.green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel.blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel.opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel.index=(MagickRealType) ScaleShortToQuantum(channels[4]);
return(pixel);
}
static MagickPixelPacket GetMeanPixelList(PixelList *pixel_list)
{
MagickPixelPacket
pixel;
MagickRealType
sum;
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the mean value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
sum=0.0;
do
{
color=list->nodes[color].next[0];
sum+=(MagickRealType) list->nodes[color].count*color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
channels[channel]=(unsigned short) sum;
}
GetMagickPixelPacket((const Image *) NULL,&pixel);
pixel.red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel.green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel.blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel.opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel.index=(MagickRealType) ScaleShortToQuantum(channels[4]);
return(pixel);
}
static MagickPixelPacket GetMedianPixelList(PixelList *pixel_list)
{
MagickPixelPacket
pixel;
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the median value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
do
{
color=list->nodes[color].next[0];
count+=list->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
channels[channel]=(unsigned short) color;
}
GetMagickPixelPacket((const Image *) NULL,&pixel);
pixel.red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel.green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel.blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel.opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel.index=(MagickRealType) ScaleShortToQuantum(channels[4]);
return(pixel);
}
static MagickPixelPacket GetMinimumPixelList(PixelList *pixel_list)
{
MagickPixelPacket
pixel;
register SkipList
*list;
register ssize_t
channel;
size_t
color,
minimum;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the minimum value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
count=0;
color=65536UL;
minimum=list->nodes[color].next[0];
do
{
color=list->nodes[color].next[0];
if (color < minimum)
minimum=color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) minimum;
}
GetMagickPixelPacket((const Image *) NULL,&pixel);
pixel.red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel.green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel.blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel.opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel.index=(MagickRealType) ScaleShortToQuantum(channels[4]);
return(pixel);
}
static MagickPixelPacket GetModePixelList(PixelList *pixel_list)
{
MagickPixelPacket
pixel;
register SkipList
*list;
register ssize_t
channel;
size_t
color,
max_count,
mode;
ssize_t
count;
unsigned short
channels[5];
/*
Make each pixel the 'predominate color' of the specified neighborhood.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
mode=color;
max_count=list->nodes[mode].count;
count=0;
do
{
color=list->nodes[color].next[0];
if (list->nodes[color].count > max_count)
{
mode=color;
max_count=list->nodes[mode].count;
}
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) mode;
}
GetMagickPixelPacket((const Image *) NULL,&pixel);
pixel.red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel.green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel.blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel.opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel.index=(MagickRealType) ScaleShortToQuantum(channels[4]);
return(pixel);
}
static MagickPixelPacket GetNonpeakPixelList(PixelList *pixel_list)
{
MagickPixelPacket
pixel;
register SkipList
*list;
register ssize_t
channel;
size_t
color,
next,
previous;
ssize_t
count;
unsigned short
channels[5];
/*
Finds the non peak value for each of the colors.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
next=list->nodes[color].next[0];
count=0;
do
{
previous=color;
color=next;
next=list->nodes[color].next[0];
count+=list->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
if ((previous == 65536UL) && (next != 65536UL))
color=next;
else
if ((previous != 65536UL) && (next == 65536UL))
color=previous;
channels[channel]=(unsigned short) color;
}
GetMagickPixelPacket((const Image *) NULL,&pixel);
pixel.red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel.green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel.blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel.opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel.index=(MagickRealType) ScaleShortToQuantum(channels[4]);
return(pixel);
}
static MagickPixelPacket GetStandardDeviationPixelList(PixelList *pixel_list)
{
MagickPixelPacket
pixel;
MagickRealType
sum,
sum_squared;
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the standard-deviation value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
sum=0.0;
sum_squared=0.0;
do
{
register ssize_t
i;
color=list->nodes[color].next[0];
sum+=(MagickRealType) list->nodes[color].count*color;
for (i=0; i < (ssize_t) list->nodes[color].count; i++)
sum_squared+=((MagickRealType) color)*((MagickRealType) color);
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
sum_squared/=pixel_list->length;
channels[channel]=(unsigned short) sqrt(sum_squared-(sum*sum));
}
GetMagickPixelPacket((const Image *) NULL,&pixel);
pixel.red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel.green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel.blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel.opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel.index=(MagickRealType) ScaleShortToQuantum(channels[4]);
return(pixel);
}
static inline void InsertPixelList(const Image *image,const PixelPacket *pixel,
const IndexPacket *indexes,PixelList *pixel_list)
{
size_t
signature;
unsigned short
index;
index=ScaleQuantumToShort(pixel->red);
signature=pixel_list->lists[0].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[0].nodes[index].count++;
else
AddNodePixelList(pixel_list,0,index);
index=ScaleQuantumToShort(pixel->green);
signature=pixel_list->lists[1].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[1].nodes[index].count++;
else
AddNodePixelList(pixel_list,1,index);
index=ScaleQuantumToShort(pixel->blue);
signature=pixel_list->lists[2].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[2].nodes[index].count++;
else
AddNodePixelList(pixel_list,2,index);
index=ScaleQuantumToShort(pixel->opacity);
signature=pixel_list->lists[3].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[3].nodes[index].count++;
else
AddNodePixelList(pixel_list,3,index);
if (image->colorspace == CMYKColorspace)
index=ScaleQuantumToShort(*indexes);
signature=pixel_list->lists[4].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[4].nodes[index].count++;
else
AddNodePixelList(pixel_list,4,index);
}
static inline MagickRealType MagickAbsoluteValue(const MagickRealType x)
{
if (x < 0)
return(-x);
return(x);
}
static void ResetPixelList(PixelList *pixel_list)
{
int
level;
register ListNode
*root;
register SkipList
*list;
register ssize_t
channel;
/*
Reset the skip-list.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
root=list->nodes+65536UL;
list->level=0;
for (level=0; level < 9; level++)
root->next[level]=65536UL;
}
pixel_list->seed=pixel_list->signature++;
}
MagickExport Image *StatisticImage(const Image *image,const StatisticType type,
const size_t width,const size_t height,ExceptionInfo *exception)
{
Image
*statistic_image;
statistic_image=StatisticImageChannel(image,DefaultChannels,type,width,
height,exception);
return(statistic_image);
}
MagickExport Image *StatisticImageChannel(const Image *image,
const ChannelType channel,const StatisticType type,const size_t width,
const size_t height,ExceptionInfo *exception)
{
#define StatisticWidth \
(width == 0 ? GetOptimalKernelWidth2D((double) width,0.5) : width)
#define StatisticHeight \
(height == 0 ? GetOptimalKernelWidth2D((double) height,0.5) : height)
#define StatisticImageTag "Statistic/Image"
CacheView
*image_view,
*statistic_view;
Image
*statistic_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelList
**restrict pixel_list;
ssize_t
y;
/*
Initialize statistics image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
statistic_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (statistic_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(statistic_image,DirectClass) == MagickFalse)
{
InheritException(exception,&statistic_image->exception);
statistic_image=DestroyImage(statistic_image);
return((Image *) NULL);
}
pixel_list=AcquirePixelListThreadSet(StatisticWidth,StatisticHeight);
if (pixel_list == (PixelList **) NULL)
{
statistic_image=DestroyImage(statistic_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Make each pixel the min / max / median / mode / etc. of the neighborhood.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
statistic_view=AcquireCacheView(statistic_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) statistic_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict statistic_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) StatisticWidth/2L),y-
(ssize_t) (StatisticHeight/2L),image->columns+StatisticWidth,
StatisticHeight,exception);
q=QueueCacheViewAuthenticPixels(statistic_view,0,y,statistic_image->columns, 1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
statistic_indexes=GetCacheViewAuthenticIndexQueue(statistic_view);
for (x=0; x < (ssize_t) statistic_image->columns; x++)
{
MagickPixelPacket
pixel;
register const IndexPacket
*restrict s;
register const PixelPacket
*restrict r;
register ssize_t
u,
v;
r=p;
s=indexes+x;
ResetPixelList(pixel_list[id]);
for (v=0; v < (ssize_t) StatisticHeight; v++)
{
for (u=0; u < (ssize_t) StatisticWidth; u++)
InsertPixelList(image,r+u,s+u,pixel_list[id]);
r+=image->columns+StatisticWidth;
s+=image->columns+StatisticWidth;
}
GetMagickPixelPacket(image,&pixel);
SetMagickPixelPacket(image,p+StatisticWidth*StatisticHeight/2,indexes+
StatisticWidth*StatisticHeight/2+x,&pixel);
switch (type)
{
case GradientStatistic:
{
MagickPixelPacket
maximum,
minimum;
minimum=GetMinimumPixelList(pixel_list[id]);
maximum=GetMaximumPixelList(pixel_list[id]);
pixel.red=MagickAbsoluteValue(maximum.red-minimum.red);
pixel.green=MagickAbsoluteValue(maximum.green-minimum.green);
pixel.blue=MagickAbsoluteValue(maximum.blue-minimum.blue);
pixel.opacity=MagickAbsoluteValue(maximum.opacity-minimum.opacity);
if (image->colorspace == CMYKColorspace)
pixel.index=MagickAbsoluteValue(maximum.index-minimum.index);
break;
}
case MaximumStatistic:
{
pixel=GetMaximumPixelList(pixel_list[id]);
break;
}
case MeanStatistic:
{
pixel=GetMeanPixelList(pixel_list[id]);
break;
}
case MedianStatistic:
default:
{
pixel=GetMedianPixelList(pixel_list[id]);
break;
}
case MinimumStatistic:
{
pixel=GetMinimumPixelList(pixel_list[id]);
break;
}
case ModeStatistic:
{
pixel=GetModePixelList(pixel_list[id]);
break;
}
case NonpeakStatistic:
{
pixel=GetNonpeakPixelList(pixel_list[id]);
break;
}
case StandardDeviationStatistic:
{
pixel=GetStandardDeviationPixelList(pixel_list[id]);
break;
}
}
if ((channel & RedChannel) != 0)
q->red=ClampToQuantum(pixel.red);
if ((channel & GreenChannel) != 0)
q->green=ClampToQuantum(pixel.green);
if ((channel & BlueChannel) != 0)
q->blue=ClampToQuantum(pixel.blue);
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
q->opacity=ClampToQuantum(pixel.opacity);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
statistic_indexes[x]=(IndexPacket) ClampToQuantum(pixel.index);
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(statistic_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_StatisticImage)
#endif
proceed=SetImageProgress(image,StatisticImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
statistic_view=DestroyCacheView(statistic_view);
image_view=DestroyCacheView(image_view);
pixel_list=DestroyPixelListThreadSet(pixel_list);
return(statistic_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n s h a r p M a s k I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnsharpMaskImage() sharpens one or more image channels. We convolve the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and UnsharpMaskImage() selects a suitable radius for you.
%
% The format of the UnsharpMaskImage method is:
%
% Image *UnsharpMaskImage(const Image *image,const double radius,
% const double sigma,const double amount,const double threshold,
% ExceptionInfo *exception)
% Image *UnsharpMaskImageChannel(const Image *image,
% const ChannelType channel,const double radius,const double sigma,
% const double amount,const double threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o amount: the percentage of the difference between the original and the
% blur image that is added back into the original.
%
% o threshold: the threshold in pixels needed to apply the diffence amount.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *UnsharpMaskImage(const Image *image,const double radius,
const double sigma,const double amount,const double threshold,
ExceptionInfo *exception)
{
Image
*sharp_image;
sharp_image=UnsharpMaskImageChannel(image,DefaultChannels,radius,sigma,amount,
threshold,exception);
return(sharp_image);
}
MagickExport Image *UnsharpMaskImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
const double amount,const double threshold,ExceptionInfo *exception)
{
#define SharpenImageTag "Sharpen/Image"
CacheView
*image_view,
*unsharp_view;
Image
*unsharp_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
MagickRealType
quantum_threshold;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
unsharp_image=BlurImageChannel(image,channel,radius,sigma,exception);
if (unsharp_image == (Image *) NULL)
return((Image *) NULL);
quantum_threshold=(MagickRealType) QuantumRange*threshold;
/*
Unsharp-mask image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
image_view=AcquireCacheView(image);
unsharp_view=AcquireCacheView(unsharp_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict unsharp_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(unsharp_view,0,y,unsharp_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
unsharp_indexes=GetCacheViewAuthenticIndexQueue(unsharp_view);
pixel=bias;
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
{
pixel.red=p->red-(MagickRealType) q->red;
if (fabs(2.0*pixel.red) < quantum_threshold)
pixel.red=(MagickRealType) GetRedPixelComponent(p);
else
pixel.red=(MagickRealType) p->red+(pixel.red*amount);
SetRedPixelComponent(q,ClampRedPixelComponent(&pixel));
}
if ((channel & GreenChannel) != 0)
{
pixel.green=p->green-(MagickRealType) q->green;
if (fabs(2.0*pixel.green) < quantum_threshold)
pixel.green=(MagickRealType) GetGreenPixelComponent(p);
else
pixel.green=(MagickRealType) p->green+(pixel.green*amount);
SetGreenPixelComponent(q,ClampGreenPixelComponent(&pixel));
}
if ((channel & BlueChannel) != 0)
{
pixel.blue=p->blue-(MagickRealType) q->blue;
if (fabs(2.0*pixel.blue) < quantum_threshold)
pixel.blue=(MagickRealType) GetBluePixelComponent(p);
else
pixel.blue=(MagickRealType) p->blue+(pixel.blue*amount);
SetBluePixelComponent(q,ClampBluePixelComponent(&pixel));
}
if ((channel & OpacityChannel) != 0)
{
pixel.opacity=p->opacity-(MagickRealType) q->opacity;
if (fabs(2.0*pixel.opacity) < quantum_threshold)
pixel.opacity=(MagickRealType) GetOpacityPixelComponent(p);
else
pixel.opacity=p->opacity+(pixel.opacity*amount);
SetOpacityPixelComponent(q,ClampOpacityPixelComponent(&pixel));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
pixel.index=indexes[x]-(MagickRealType) unsharp_indexes[x];
if (fabs(2.0*pixel.index) < quantum_threshold)
pixel.index=(MagickRealType) indexes[x];
else
pixel.index=(MagickRealType) indexes[x]+(pixel.index*amount);
unsharp_indexes[x]=ClampToQuantum(pixel.index);
}
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(unsharp_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_UnsharpMaskImageChannel)
#endif
proceed=SetImageProgress(image,SharpenImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
unsharp_image->type=image->type;
unsharp_view=DestroyCacheView(unsharp_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
unsharp_image=DestroyImage(unsharp_image);
return(unsharp_image);
}
|
local_response_norm.h | // Copyright 2018 Xiaomi, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef MACE_KERNELS_LOCAL_RESPONSE_NORM_H_
#define MACE_KERNELS_LOCAL_RESPONSE_NORM_H_
#include <algorithm>
#include <cmath>
#include <memory>
#include <vector>
#include "mace/core/future.h"
#include "mace/core/tensor.h"
#include "mace/kernels/kernel.h"
#ifdef MACE_ENABLE_OPENCL
#include "mace/core/runtime/opencl/cl2_header.h"
#endif // MACE_ENABLE_OPENCL
namespace mace {
namespace kernels {
template<DeviceType D, typename T>
struct LocalResponseNormFunctor;
template<>
struct LocalResponseNormFunctor<DeviceType::CPU, float> : OpKernel {
explicit LocalResponseNormFunctor(OpKernelContext *context)
: OpKernel(context) {}
MaceStatus operator()(const Tensor *input,
int depth_radius,
float bias,
float alpha,
float beta,
Tensor *output,
StatsFuture *future) {
MACE_UNUSED(future);
const index_t batch = input->dim(0);
const index_t channels = input->dim(1);
const index_t height = input->dim(2);
const index_t width = input->dim(3);
const float *input_ptr = input->data<float>();
float *output_ptr = output->mutable_data<float>();
index_t image_size = height * width;
index_t batch_size = channels * image_size;
#pragma omp parallel for collapse(2)
for (index_t b = 0; b < batch; ++b) {
for (index_t c = 0; c < channels; ++c) {
const int begin_input_c = std::max(static_cast<index_t>(0),
c - depth_radius);
const int end_input_c = std::min(channels, c + depth_radius + 1);
index_t pos = b * batch_size;
for (index_t hw = 0; hw < height * width; ++hw, ++pos) {
float accum = 0.f;
for (int input_c = begin_input_c; input_c < end_input_c; ++input_c) {
const float input_val = input_ptr[pos + input_c * image_size];
accum += input_val * input_val;
}
const float multiplier = std::pow(bias + alpha * accum, -beta);
output_ptr[pos + c * image_size] =
input_ptr[pos + c * image_size] * multiplier;
}
}
}
return MACE_SUCCESS;
}
};
} // namespace kernels
} // namespace mace
#endif // MACE_KERNELS_LOCAL_RESPONSE_NORM_H_
|
ep.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 3.0 structured OpenMP C versions - EP
This benchmark is an OpenMP C version of the NPB EP code.
The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions
in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Author: P. O. Frederickson
D. H. Bailey
A. C. Woo
OpenMP C version: S. Satoh
3.0 structure translation: M. Popov
--------------------------------------------------------------------*/
#include "../common/npb-C.h"
#include "npbparams.h"
/* parameters */
#define MK 16
#define MM (M - MK)
#define NN (1 << MM)
#define NK (1 << MK)
#define NQ 10
#define EPSILON 1.0e-8
#define A 1220703125.0
#define S 271828183.0
#define TIMERS_ENABLED FALSE
/* global variables */
/* common /storage/ */
#include <omp.h>
static double x[131072];
static double q[10];
/*--------------------------------------------------------------------
program EMBAR
c-------------------------------------------------------------------*/
/*
c This is the serial version of the APP Benchmark 1,
c the "embarassingly parallel" benchmark.
c
c M is the Log_2 of the number of complex pairs of uniform (0, 1) random
c numbers. MK is the Log_2 of the size of each batch of uniform random
c numbers. MK can be set for convenience on a given system, since it does
c not affect the results.
*/
int main(int argc,char **argv)
{
double Mops;
double t1;
double t2;
double t3;
double t4;
double x1;
double x2;
double sx;
double sy;
double tm;
double an;
double tt;
double gc;
double dum[3] = {(1.0), (1.0), (1.0)};
int np;
int ierr;
int node;
int no_nodes;
int i;
int ik;
int kk;
int l;
int k;
int nit;
int ierrcode;
int no_large_nodes;
int np_add;
int k_offset;
int j;
int nthreads = 1;
boolean verified;
/* character*13 */
char size[14];
/*
c Because the size of the problem is too large to store in a 32-bit
c integer for some classes, we put it into a string (for printing).
c Have to strip off the decimal point put in there by the floating
c point print statement (internal file)
*/
printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version - EP Benchmark\n");
sprintf(size,"%12.0f",(pow(2.0,(28 + 1))));
#pragma omp parallel for private (j)
for (j = 13; j >= 1; j += -1) {
if (size[j] == '.')
size[j] = ' ';
}
printf(" Number of random numbers generated: %13s\n",size);
verified = 0;
/*
c Compute the number of "batches" of random number pairs generated
c per processor. Adjust if the number of processors does not evenly
c divide the total number
*/
np = 1 << 28 - 16;
/*
c Call the random number generator functions and initialize
c the x-array to reduce the effects of paging on the timings.
c Also, call all mathematical functions that are used. Make
c sure these initializations cannot be eliminated as dead code.
*/
vranlc(0,&dum[0],dum[1],&dum[2]);
dum[0] = randlc(&dum[1],dum[2]);
#pragma omp parallel for private (i)
for (i = 0; i <= 131071; i += 1) {
x[i] = - 1.0e99;
}
Mops = log((sqrt((fabs((1.0 > 1.0?1.0 : 1.0))))));
timer_clear(1);
timer_clear(2);
timer_clear(3);
timer_start(1);
vranlc(0,&t1,1220703125.0,x);
/* Compute AN = A ^ (2 * NK) (mod 2^46). */
t1 = 1220703125.0;
for (i = 1; i <= 17; i += 1) {
t2 = randlc(&t1,t1);
}
an = t1;
tt = 271828183.0;
gc = 0.0;
sx = 0.0;
sy = 0.0;
#pragma omp parallel for private (i)
for (i = 0; i <= 9; i += 1) {
q[i] = 0.0;
}
/*
c Each instance of this loop may be performed independently. We compute
c the k offsets separately to take into account the fact that some nodes
c have more numbers to generate than others
*/
k_offset = - 1;
{
double t1;
double t2;
double t3;
double t4;
double x1;
double x2;
int kk;
int i;
int ik;
int l;
/* private copy of q[0:NQ-1] */
double qq[10];
#pragma omp parallel for private (i)
for (i = 0; i <= 9; i += 1) {
qq[i] = 0.0;
}
for (k = 1; k <= np; k += 1) {
kk = k_offset + k;
t1 = 271828183.0;
t2 = an;
/* Find starting seed t1 for this kk. */
for (i = 1; i <= 100; i += 1) {
ik = kk / 2;
if (2 * ik != kk)
t3 = randlc(&t1,t2);
if (ik == 0)
break;
t3 = randlc(&t2,t2);
kk = ik;
}
/* Compute uniform pseudorandom numbers. */
if (0 == 1)
timer_start(3);
vranlc(2 * (1 << 16),&t1,1220703125.0,x - 1);
if (0 == 1)
timer_stop(3);
/*
c Compute Gaussian deviates by acceptance-rejection method and
c tally counts in concentric square annuli. This loop is not
c vectorizable.
*/
if (0 == 1)
timer_start(2);
for (i = 0; i <= 65535; i += 1) {
x1 = 2.0 * x[2 * i] - 1.0;
x2 = 2.0 * x[2 * i + 1] - 1.0;
t1 = x1 * x1 + x2 * x2;
if (t1 <= 1.0) {
t2 = sqrt(- 2.0 * log(t1) / t1);
/* Xi */
t3 = x1 * t2;
/* Yi */
t4 = x2 * t2;
l = ((fabs(t3) > fabs(t4)?fabs(t3) : fabs(t4)));
/* counts */
qq[l] += 1.0;
/* sum of Xi */
sx = sx + t3;
/* sum of Yi */
sy = sy + t4;
}
}
if (0 == 1)
timer_stop(2);
}
{
#pragma omp parallel for private (i)
for (i = 0; i <= 9; i += 1) {
q[i] += qq[i];
}
}
#if defined(_OPENMP)
#endif /* _OPENMP */
/* end of parallel region */
}
#pragma omp parallel for private (i) reduction (+:gc)
for (i = 0; i <= 9; i += 1) {
gc = gc + q[i];
}
timer_stop(1);
tm = timer_read(1);
nit = 0;
if (28 == 24) {
if (fabs((sx - - 3.247834652034740e3) / sx) <= 1.0e-8 && fabs((sy - - 6.958407078382297e3) / sy) <= 1.0e-8) {
verified = 1;
}
}
else if (28 == 25) {
if (fabs((sx - - 2.863319731645753e3) / sx) <= 1.0e-8 && fabs((sy - - 6.320053679109499e3) / sy) <= 1.0e-8) {
verified = 1;
}
}
else if (28 == 28) {
if (fabs((sx - - 4.295875165629892e3) / sx) <= 1.0e-8 && fabs((sy - - 1.580732573678431e4) / sy) <= 1.0e-8) {
verified = 1;
}
}
else if (28 == 30) {
if (fabs((sx - 4.033815542441498e4) / sx) <= 1.0e-8 && fabs((sy - - 2.660669192809235e4) / sy) <= 1.0e-8) {
verified = 1;
}
}
else if (28 == 32) {
if (fabs((sx - 4.764367927995374e4) / sx) <= 1.0e-8 && fabs((sy - - 8.084072988043731e4) / sy) <= 1.0e-8) {
verified = 1;
}
}
Mops = pow(2.0,(28 + 1)) / tm / 1000000.0;
printf("EP Benchmark Results: \nCPU Time = %10.4f\nN = 2^%5d\nNo. Gaussian Pairs = %15.0f\nSums = %25.15e %25.15e\nCounts:\n",tm,28,gc,sx,sy);
for (i = 0; i <= 9; i += 1) {
printf("%3d %15.0f\n",i,q[i]);
}
c_print_results("EP",'A',28 + 1,0,0,nit,nthreads,tm,Mops,"Random numbers generated",verified,"3.0 structured","01 Dec 2019","(none)","(none)","-lm","(none)","(none)","(none)","randdp");
if (0 == 1) {
printf("Total time: %f",(timer_read(1)));
printf("Gaussian pairs: %f",(timer_read(2)));
printf("Random numbers: %f",(timer_read(3)));
}
}
|
target_data_messages.c | // RUN: %clang_cc1 -triple x86_64-apple-macos10.7.0 -verify=expected,omp45 -fopenmp -fopenmp-version=45 -ferror-limit 100 -o - %s -Wuninitialized
// RUN: %clang_cc1 -triple x86_64-apple-macos10.7.0 -verify=expected,omp50 -fopenmp -fopenmp-version=50 -ferror-limit 100 -o - %s -Wuninitialized
// RUN: %clang_cc1 -triple x86_64-apple-macos10.7.0 -verify=expected,omp45 -fopenmp-simd -fopenmp-version=45 -ferror-limit 100 -o - %s -Wuninitialized
// RUN: %clang_cc1 -triple x86_64-apple-macos10.7.0 -verify=expected,omp50 -fopenmp-simd -fopenmp-version=50 -ferror-limit 100 -o - %s -Wuninitialized
void foo() { }
void xxx(int argc) {
int map; // expected-note {{initialize the variable 'map' to silence this warning}}
#pragma omp target data map(map) // expected-warning {{variable 'map' is uninitialized when used here}}
for (int i = 0; i < 10; ++i)
;
}
int main(int argc, char **argv) {
int a;
#pragma omp target data // omp45-error {{expected at least one 'map' or 'use_device_ptr' clause for '#pragma omp target data'}} omp50-error {{expected at least one 'map', 'use_device_ptr', or 'use_device_addr' clause for '#pragma omp target data'}}
{}
L1:
foo();
#pragma omp target data map(a) allocate(a) // expected-error {{unexpected OpenMP clause 'allocate' in directive '#pragma omp target data'}}
{
foo();
goto L1; // expected-error {{use of undeclared label 'L1'}}
}
goto L2; // expected-error {{use of undeclared label 'L2'}}
#pragma omp target data map(a)
L2:
foo();
#pragma omp target data map(a)(i) // expected-warning {{extra tokens at the end of '#pragma omp target data' are ignored}}
{
foo();
}
#pragma omp target unknown // expected-warning {{extra tokens at the end of '#pragma omp target' are ignored}}
{
foo();
}
#pragma omp target data map(delete: a) // expected-error {{map type 'delete' is not allowed for '#pragma omp target data'}}
{
foo();
}
#pragma omp target data map(release: a) // expected-error {{map type 'release' is not allowed for '#pragma omp target data'}}
{
foo();
}
return 0;
}
|
DRB057-jacobiinitialize-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Use of private() clause
*/
#include <stdio.h>
#include <math.h>
#define MSIZE 200
int n=MSIZE, m=MSIZE;
double alpha = 0.0543;
double u[MSIZE][MSIZE], f[MSIZE][MSIZE], uold[MSIZE][MSIZE];
double dx, dy;
void
initialize ()
{
int i, j, xx, yy;
dx = 2.0 / (n - 1);
dy = 2.0 / (m - 1);
/* Initialize initial condition and RHS */
#pragma omp parallel for private(i,j,xx,yy)
for (i = 0; i < n; i++)
#pragma omp parallel for private(j,xx,yy)
for (j = 0; j < m; j++)
{
xx = (int) (-1.0 + dx * (i - 1)); /* -1 < x < 1 */
yy = (int) (-1.0 + dy * (j - 1)); /* -1 < y < 1 */
u[i][j] = 0.0;
f[i][j] = -1.0 * alpha * (1.0 - xx * xx) * (1.0 - yy * yy)
- 2.0 * (1.0 - xx * xx) - 2.0 * (1.0 - yy * yy);
}
}
int main()
{
initialize();
int i, j;
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++) {
printf("%lf %lf\n", u[i][j], f[i][j]);
}
}
return 0;
}
|
test_omp.c | #include <nautilus/nautilus.h>
#include <rt/omp/omp.h>
#define N 4
volatile float a[N];
volatile float b[N];
volatile float c[N];
static int omp_simple()
{
int i;
for (i=0;i<N;i++) {
a[i] = i;
b[i] = i;
}
#pragma omp parallel
nk_vc_printf("I am thread %d (%d total)\n",omp_get_thread_num(),omp_get_num_threads());
#pragma omp parallel for
for (i=0;i<N;i++) {
c[i] = a[i] * b[i];
}
for (i=0;i<N;i++) {
nk_vc_printf("a[%d]=%d b[%d]=%d c[%d]=%d\n",i,(int)a[i],i,(int)b[i],i,(int)c[i]);
}
return 0;
}
static void report_num_threads(int level)
{
#pragma omp single
{
nk_vc_printf("Level %d: number of threads in the team - %d\n",
level, omp_get_num_threads());
}
}
static int omp_nested()
{
omp_set_dynamic(0);
#pragma omp parallel num_threads(2)
{
report_num_threads(1);
#pragma omp parallel num_threads(2)
{
report_num_threads(2);
#pragma omp parallel num_threads(2)
{
report_num_threads(3);
}
}
}
return(0);
}
int test_omp()
{
nk_omp_thread_init();
nk_vc_printf("Starting simple test\n");
omp_simple();
// goto out;
nk_vc_printf("Starting nested test\n");
omp_nested();
out:
nk_vc_printf("OMP test finished\n");
nk_omp_thread_deinit();
return 0;
}
|
GB_unop__sinh_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__sinh_fc32_fc32)
// op(A') function: GB (_unop_tran__sinh_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = csinhf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = csinhf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = csinhf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SINH || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__sinh_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = csinhf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = csinhf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__sinh_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
1961.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "3mm.h"
/* Array initialization. */
static
void init_array(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nk; j++)
A[i][j] = ((DATA_TYPE) i*j) / ni;
for (i = 0; i < nk; i++)
for (j = 0; j < nj; j++)
B[i][j] = ((DATA_TYPE) i*(j+1)) / nj;
for (i = 0; i < nj; i++)
for (j = 0; j < nm; j++)
C[i][j] = ((DATA_TYPE) i*(j+3)) / nl;
for (i = 0; i < nm; i++)
for (j = 0; j < nl; j++)
D[i][j] = ((DATA_TYPE) i*(j+2)) / nk;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nl,
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nl; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]);
if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_3mm(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl),
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j, k;
#pragma scop
#pragma omp parallel private (i, j, k) num_threads(#P11)
{
/* E := A*B */
#pragma omp parallel for schedule(static, 2) simd num_threads(2)
for (i = 0; i < _PB_NI; i++)
{
#pragma omp parallel for schedule(static, 2) simd num_threads(2)
for (j = 0; j < _PB_NJ; j++)
{
E[i][j] = 0;
for (k = 0; k < _PB_NK; ++k)
E[i][j] += A[i][k] * B[k][j];
}
}
/* F := C*D */
#pragma omp parallel for schedule(static, 2) simd num_threads(2)
for (i = 0; i < _PB_NJ; i++)
{
#pragma omp parallel for schedule(static, 2) simd num_threads(2)
for (j = 0; j < _PB_NL; j++)
{
F[i][j] = 0;
for (k = 0; k < _PB_NM; ++k)
F[i][j] += C[i][k] * D[k][j];
}
}
/* G := E*F */
#pragma omp parallel for schedule(static, 2) simd num_threads(2)
for (i = 0; i < _PB_NI; i++)
{
#pragma omp parallel for schedule(static, 2) simd num_threads(2)
for (j = 0; j < _PB_NL; j++)
{
G[i][j] = 0;
for (k = 0; k < _PB_NJ; ++k)
G[i][j] += E[i][k] * F[k][j];
}
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
int nk = NK;
int nl = NL;
int nm = NM;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj);
POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl);
POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm);
POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl);
POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl);
/* Initialize array(s). */
init_array (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_3mm (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(E),
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(F),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D),
POLYBENCH_ARRAY(G));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(E);
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
POLYBENCH_FREE_ARRAY(F);
POLYBENCH_FREE_ARRAY(C);
POLYBENCH_FREE_ARRAY(D);
POLYBENCH_FREE_ARRAY(G);
return 0;
}
|
GB_unaryop__lnot_fp64_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_fp64_int16
// op(A') function: GB_tran__lnot_fp64_int16
// C type: double
// A type: int16_t
// cast: double cij = (double) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP64 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_fp64_int16
(
double *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_fp64_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
struct.c | // RUN: %libomptarget-compile-generic -fopenmp-extensions
// RUN: %libomptarget-run-generic | %fcheck-generic -strict-whitespace
// Wrong results on amdgpu
// XFAIL: amdgcn-amd-amdhsa
// XFAIL: amdgcn-amd-amdhsa-newRTL
#include <omp.h>
#include <stdio.h>
#define CHECK_PRESENCE(Var1, Var2, Var3) \
printf(" presence of %s, %s, %s: %d, %d, %d\n", \
#Var1, #Var2, #Var3, \
omp_target_is_present(&(Var1), omp_get_default_device()), \
omp_target_is_present(&(Var2), omp_get_default_device()), \
omp_target_is_present(&(Var3), omp_get_default_device()))
#define CHECK_VALUES(Var1, Var2) \
printf(" values of %s, %s: %d, %d\n", \
#Var1, #Var2, (Var1), (Var2))
int main() {
struct S { int i; int j; } s;
// CHECK: presence of s, s.i, s.j: 0, 0, 0
CHECK_PRESENCE(s, s.i, s.j);
// =======================================================================
// Check that ompx_hold keeps entire struct present.
// -----------------------------------------------------------------------
// CHECK-LABEL: check:{{.*}}
printf("check: ompx_hold only on first member\n");
s.i = 20;
s.j = 30;
#pragma omp target data map(tofrom: s) map(ompx_hold,tofrom: s.i) \
map(tofrom: s.j)
{
// CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1
CHECK_PRESENCE(s, s.i, s.j);
#pragma omp target map(tofrom: s)
{
s.i = 21;
s.j = 31;
}
#pragma omp target exit data map(delete: s, s.i)
// ompx_hold on s.i applies to all of s.
// CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1
// CHECK-NEXT: values of s.i, s.j: 20, 30
CHECK_PRESENCE(s, s.i, s.j);
CHECK_VALUES(s.i, s.j);
}
// CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0
// CHECK-NEXT: values of s.i, s.j: 21, 31
CHECK_PRESENCE(s, s.i, s.j);
CHECK_VALUES(s.i, s.j);
// -----------------------------------------------------------------------
// CHECK-LABEL: check:{{.*}}
printf("check: ompx_hold only on last member\n");
s.i = 20;
s.j = 30;
#pragma omp target data map(tofrom: s) map(tofrom: s.i) \
map(ompx_hold,tofrom: s.j)
{
// CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1
CHECK_PRESENCE(s, s.i, s.j);
#pragma omp target map(tofrom: s)
{
s.i = 21;
s.j = 31;
}
#pragma omp target exit data map(delete: s, s.i)
// ompx_hold on s.j applies to all of s.
// CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1
// CHECK-NEXT: values of s.i, s.j: 20, 30
CHECK_PRESENCE(s, s.i, s.j);
CHECK_VALUES(s.i, s.j);
}
// CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0
// CHECK-NEXT: values of s.i, s.j: 21, 31
CHECK_PRESENCE(s, s.i, s.j);
CHECK_VALUES(s.i, s.j);
// -----------------------------------------------------------------------
// CHECK-LABEL: check:{{.*}}
printf("check: ompx_hold only on struct\n");
s.i = 20;
s.j = 30;
#pragma omp target data map(ompx_hold,tofrom: s) map(tofrom: s.i) \
map(tofrom: s.j)
{
// CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1
CHECK_PRESENCE(s, s.i, s.j);
#pragma omp target map(tofrom: s)
{
s.i = 21;
s.j = 31;
}
#pragma omp target exit data map(delete: s, s.i)
// CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1
// CHECK-NEXT: values of s.i, s.j: 20, 30
CHECK_PRESENCE(s, s.i, s.j);
CHECK_VALUES(s.i, s.j);
}
// CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0
// CHECK-NEXT: values of s.i, s.j: 21, 31
CHECK_PRESENCE(s, s.i, s.j);
CHECK_VALUES(s.i, s.j);
// =======================================================================
// Check that transfer to/from host checks reference count correctly.
// -----------------------------------------------------------------------
// CHECK-LABEL: check:{{.*}}
printf("check: parent DynRefCount=1 is not sufficient for transfer\n");
s.i = 20;
s.j = 30;
#pragma omp target data map(ompx_hold, tofrom: s)
#pragma omp target data map(ompx_hold, tofrom: s)
{
// CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1
CHECK_PRESENCE(s, s.i, s.j);
#pragma omp target map(from: s.i, s.j)
{
s.i = 21;
s.j = 31;
} // No transfer here even though parent's DynRefCount=1.
// CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1
// CHECK-NEXT: values of s.i, s.j: 20, 30
CHECK_PRESENCE(s, s.i, s.j);
CHECK_VALUES(s.i, s.j);
#pragma omp target map(to: s.i, s.j)
{ // No transfer here even though parent's DynRefCount=1.
// CHECK-NEXT: values of s.i, s.j: 21, 31
CHECK_VALUES(s.i, s.j);
}
}
// CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0
// CHECK-NEXT: values of s.i, s.j: 21, 31
CHECK_PRESENCE(s, s.i, s.j);
CHECK_VALUES(s.i, s.j);
// -----------------------------------------------------------------------
// CHECK-LABEL: check:{{.*}}
printf("check: parent HoldRefCount=1 is not sufficient for transfer\n");
s.i = 20;
s.j = 30;
#pragma omp target data map(tofrom: s)
#pragma omp target data map(tofrom: s)
{
// CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1
CHECK_PRESENCE(s, s.i, s.j);
#pragma omp target map(ompx_hold, from: s.i, s.j)
{
s.i = 21;
s.j = 31;
} // No transfer here even though parent's HoldRefCount=1.
// CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1
// CHECK-NEXT: values of s.i, s.j: 20, 30
CHECK_PRESENCE(s, s.i, s.j);
CHECK_VALUES(s.i, s.j);
#pragma omp target map(ompx_hold, to: s.i, s.j)
{ // No transfer here even though parent's HoldRefCount=1.
// CHECK-NEXT: values of s.i, s.j: 21, 31
CHECK_VALUES(s.i, s.j);
}
}
// CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0
// CHECK-NEXT: values of s.i, s.j: 21, 31
CHECK_PRESENCE(s, s.i, s.j);
CHECK_VALUES(s.i, s.j);
// -----------------------------------------------------------------------
// CHECK-LABEL: check:{{.*}}
//
// At the beginning of a region, if the parent's TotalRefCount=1, then the
// transfer should happen.
//
// At the end of a region, it also must be true that the reference count being
// decremented is the reference count that is 1.
printf("check: parent TotalRefCount=1 is not sufficient for transfer\n");
s.i = 20;
s.j = 30;
#pragma omp target data map(ompx_hold, tofrom: s)
{
// CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1
CHECK_PRESENCE(s, s.i, s.j);
#pragma omp target map(ompx_hold, tofrom: s.i, s.j)
{
s.i = 21;
s.j = 31;
}
#pragma omp target exit data map(from: s.i, s.j)
// No transfer here even though parent's TotalRefCount=1.
// CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1
// CHECK-NEXT: values of s.i, s.j: 20, 30
CHECK_PRESENCE(s, s.i, s.j);
CHECK_VALUES(s.i, s.j);
}
// CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0
// CHECK-NEXT: values of s.i, s.j: 21, 31
CHECK_PRESENCE(s, s.i, s.j);
CHECK_VALUES(s.i, s.j);
return 0;
}
|
exchange_boundary.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#include <stdint.h>
#include "../timer.h"
//------------------------------------------------------------------------------------------------------------------------------
void DoBufferCopy(domain_type *domain, int level, int grid_id, int buffer){
// copy 3D array from read_i,j,k of read[] to write_i,j,k in write[]
int dim_i = domain->bufferCopies[level][buffer].dim.i;
int dim_j = domain->bufferCopies[level][buffer].dim.j;
int dim_k = domain->bufferCopies[level][buffer].dim.k;
int read_i = domain->bufferCopies[level][buffer].read.i;
int read_j = domain->bufferCopies[level][buffer].read.j;
int read_k = domain->bufferCopies[level][buffer].read.k;
int read_pencil = domain->bufferCopies[level][buffer].read.pencil;
int read_plane = domain->bufferCopies[level][buffer].read.plane;
int write_i = domain->bufferCopies[level][buffer].write.i;
int write_j = domain->bufferCopies[level][buffer].write.j;
int write_k = domain->bufferCopies[level][buffer].write.k;
int write_pencil = domain->bufferCopies[level][buffer].write.pencil;
int write_plane = domain->bufferCopies[level][buffer].write.plane;
double * __restrict__ read = domain->bufferCopies[level][buffer].read.ptr;
double * __restrict__ write = domain->bufferCopies[level][buffer].write.ptr;
if(domain->bufferCopies[level][buffer].read.box >=0) read = domain->subdomains[ domain->bufferCopies[level][buffer].read.box].levels[level].grids[grid_id];
if(domain->bufferCopies[level][buffer].write.box>=0)write = domain->subdomains[domain->bufferCopies[level][buffer].write.box].levels[level].grids[grid_id];
int i,j,k,read_ijk,write_ijk;
if(dim_i==1){ // be smart and don't have an inner loop from 0 to 1
for(k=0;k<dim_k;k++){
for(j=0;j<dim_j;j++){
int read_ijk = ( read_i) + (j+ read_j)* read_pencil + (k+ read_k)* read_plane;
int write_ijk = (write_i) + (j+write_j)*write_pencil + (k+write_k)*write_plane;
write[write_ijk] = read[read_ijk];
}}
}else if(dim_i==4){ // be smart and don't have an inner loop from 0 to 4
for(k=0;k<dim_k;k++){
for(j=0;j<dim_j;j++){
int read_ijk = ( read_i) + (j+ read_j)* read_pencil + (k+ read_k)* read_plane;
int write_ijk = (write_i) + (j+write_j)*write_pencil + (k+write_k)*write_plane;
write[write_ijk+0] = read[read_ijk+0];
write[write_ijk+1] = read[read_ijk+1];
write[write_ijk+2] = read[read_ijk+2];
write[write_ijk+3] = read[read_ijk+3];
}}
}else{
for(k=0;k<dim_k;k++){
for(j=0;j<dim_j;j++){
for(i=0;i<dim_i;i++){
int read_ijk = (i+ read_i) + (j+ read_j)* read_pencil + (k+ read_k)* read_plane;
int write_ijk = (i+write_i) + (j+write_j)*write_pencil + (k+write_k)*write_plane;
write[write_ijk] = read[read_ijk];
}}}
}
}
//------------------------------------------------------------------------------------------------------------------------------
// Exchange boundaries by aggregating into domain buffers
//------------------------------------------------------------------------------------------------------------------------------
void exchange_boundary(domain_type *domain, int level, int grid_id, int exchange_faces, int exchange_edges, int exchange_corners){
uint64_t _timeCommunicationStart = CycleTime();
uint64_t _timeStart,_timeEnd;
int buffer=0;
int sendBox,recvBox,n;
int faces[27] = {0,0,0,0,1,0,0,0,0, 0,1,0,1,0,1,0,1,0, 0,0,0,0,1,0,0,0,0};
int edges[27] = {0,1,0,1,0,1,0,1,0, 1,0,1,0,0,0,1,0,1, 0,1,0,1,0,1,0,1,0};
int corners[27] = {1,0,1,0,0,0,1,0,1, 0,0,0,0,0,0,0,0,0, 1,0,1,0,0,0,1,0,1};
int exchange[27] = {0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0};
for(n=0;n<27;n++){
if( exchange_faces )exchange[n] |= faces[n];
if( exchange_edges )exchange[n] |= edges[n];
if( exchange_corners )exchange[n] |= corners[n];
}
#ifdef __MPI
// there are up to 27 sends and up to 27 recvs
// packed lists of message info...
double * buffers_packed[54];
int sizes_packed[54];
int ranks_packed[54];
int tags_packed[54];
MPI_Request requests_packed[54];
MPI_Status status_packed[54];
int nMessages=0;
int sizes_all[27];
// precompute the possible sizes of each buffer (n.b. not all are necessarily used)
int di,dj,dk;
for(dk=-1;dk<=1;dk++){
for(dj=-1;dj<=1;dj++){
for(di=-1;di<=1;di++){
int n = 13+di+3*dj+9*dk;
sizes_all[n] = 1;
if(di==0)sizes_all[n]*=domain->subdomains_per_rank_in.i*domain->subdomains[0].levels[level].dim.i;else sizes_all[n]*=domain->subdomains[0].levels[level].ghosts;
if(dj==0)sizes_all[n]*=domain->subdomains_per_rank_in.j*domain->subdomains[0].levels[level].dim.j;else sizes_all[n]*=domain->subdomains[0].levels[level].ghosts;
if(dk==0)sizes_all[n]*=domain->subdomains_per_rank_in.k*domain->subdomains[0].levels[level].dim.k;else sizes_all[n]*=domain->subdomains[0].levels[level].ghosts;
}}}
// enumerate a packed list of messages... starting with receives...
for(n=0;n<27;n++)if(exchange[26-n] && (domain->rank_of_neighbor[26-n] != domain->rank) ){
buffers_packed[nMessages] = domain->recv_buffer[26-n];
sizes_packed[nMessages] = sizes_all[26-n];
ranks_packed[nMessages] = domain->rank_of_neighbor[26-n];
tags_packed[nMessages] = n;
nMessages++;
}
// enumerate a packed list of messages... continuing with sends...
for(n=0;n<27;n++)if(exchange[n] && (domain->rank_of_neighbor[n] != domain->rank) ){
buffers_packed[nMessages] = domain->send_buffer[n];
sizes_packed[nMessages] = sizes_all[n];
ranks_packed[nMessages] = domain->rank_of_neighbor[n];
tags_packed[nMessages] = n;
nMessages++;
}
// loop through packed list of MPI receives and prepost Irecv's...
_timeStart = CycleTime();
#ifdef __MPI_THREAD_MULTIPLE
#pragma omp parallel for schedule(dynamic,1)
#endif
for(n=0;n<nMessages/2;n++){
MPI_Irecv(buffers_packed[n],sizes_packed[n],MPI_DOUBLE,ranks_packed[n],tags_packed[n],MPI_COMM_WORLD,&requests_packed[n]);
}
_timeEnd = CycleTime();
domain->cycles.recv[level] += (_timeEnd-_timeStart);
// pack MPI send buffers...
_timeStart = CycleTime();
#pragma omp parallel for schedule(static,1)
for(buffer=domain->bufferCopy_Pack_Start;buffer<domain->bufferCopy_Pack_End;buffer++){
if( (domain->bufferCopies[level][buffer].isFace && exchange_faces ) ||
(domain->bufferCopies[level][buffer].isEdge && exchange_edges ) ||
(domain->bufferCopies[level][buffer].isCorner && exchange_corners) ){
DoBufferCopy(domain,level,grid_id,buffer);
}}
_timeEnd = CycleTime();
domain->cycles.pack[level] += (_timeEnd-_timeStart);
// loop through MPI send buffers and post Isend's...
_timeStart = CycleTime();
#ifdef __MPI_THREAD_MULTIPLE
#pragma omp parallel for schedule(dynamic,1)
#endif
for(n=nMessages/2;n<nMessages;n++){
MPI_Isend(buffers_packed[n],sizes_packed[n],MPI_DOUBLE,ranks_packed[n],tags_packed[n],MPI_COMM_WORLD,&requests_packed[n]);
}
_timeEnd = CycleTime();
domain->cycles.send[level] += (_timeEnd-_timeStart);
#endif
// exchange locally... try and hide within Isend latency...
_timeStart = CycleTime();
#pragma omp parallel for schedule(static,1)
for(buffer=domain->bufferCopy_Local_Start;buffer<domain->bufferCopy_Local_End;buffer++){
if( (domain->bufferCopies[level][buffer].isFace && exchange_faces ) ||
(domain->bufferCopies[level][buffer].isEdge && exchange_edges ) ||
(domain->bufferCopies[level][buffer].isCorner && exchange_corners) ){
DoBufferCopy(domain,level,grid_id,buffer);
}}
_timeEnd = CycleTime();
domain->cycles.grid2grid[level] += (_timeEnd-_timeStart);
// wait for MPI to finish...
#ifdef __MPI
_timeStart = CycleTime();
MPI_Waitall(nMessages,requests_packed,status_packed);
_timeEnd = CycleTime();
domain->cycles.wait[level] += (_timeEnd-_timeStart);
// unpack MPI receive buffers
_timeStart = CycleTime();
#pragma omp parallel for schedule(static,1)
for(buffer=domain->bufferCopy_Unpack_Start;buffer<domain->bufferCopy_Unpack_End;buffer++){
if( (domain->bufferCopies[level][buffer].isFace && exchange_faces ) ||
(domain->bufferCopies[level][buffer].isEdge && exchange_edges ) ||
(domain->bufferCopies[level][buffer].isCorner && exchange_corners) ){
DoBufferCopy(domain,level,grid_id,buffer);
}}
_timeEnd = CycleTime();
domain->cycles.unpack[level] += (_timeEnd-_timeStart);
#endif
domain->cycles.communication[level] += (uint64_t)(CycleTime()-_timeCommunicationStart);
}
|
gaussian_elimination-con-main.c | #define TYPE unsigned long
#define N_BITS 64
#define MAX_TYPE 18446744073709551615UL
//#include "../include/gaussian_elimination.h"
#include <stdio.h>
#include <stdlib.h>
#include "matrix.h"
#include <omp.h>
#include "sieve.h"
#include "vector.h"
#include <gmp.h>
/* Funzioni per realizzare l'eliminazione gaussiana in modulo 2
* La matrice di bit degli espondenti modulo 2 sara' organizzata
* nel seguente modo:
*
* N_BITS N_BITS N_BITS X = eventuale padding
* 1) [000 ... 001] [000 ... 001] ... [000 ... 0XX]
* 2) [000 ... 000] [000 ... 010] ... [000 ... 0XX]
* ... ...
* K) [010 ... 101] [010 ... 101] ... [000 ... 0XX] */
/* Tipo di dato che determina la dimensione del blocco di bit */
typedef TYPE word;
struct row_stats {
// bit piu a destra
long unsigned b_dx;
// num di bit a 1
long unsigned n_bit;
};
/****************/
/*******************************************************/
void print_bits(word a) {
unsigned int bits[N_BITS];
for(unsigned int i = 0; i < N_BITS; ++i)
bits[i] = (a >> i) & 1U;
for(int i = 63; i >= 0; --i)
printf("%d", bits[i]);
}
void print_all(unsigned long **M, int righe, int blocchi){
for(int i = 0; i < righe; ++i) {
for(int j = 0; j < blocchi; ++j) {
print_bits(get_matrix_l(M, i, j));
printf(" ");
}
printf("\n");
}
}
void print_M(unsigned int ** M, int r, int c) {
for(int i = 0; i < r; ++i) {
for(int j = 0; j < c; ++j)
printf("%u, ", get_matrix(M, i, j));
printf("\n");
}
}
void print_M_con_i(unsigned int ** M, int r, int c) {
for(int i = 0; i < r; ++i) {
printf("%d: ", i);
for(int j = 0; j < c; ++j)
printf("%u, ", get_matrix(M, i, j));
printf("\n");
}
}
void print_M_2(unsigned int ** M, int r, int c) {
for(int i = 0; i < r; ++i) {
for(int j = 0; j < c; ++j)
printf("%u", get_matrix(M, i, j) % 2);
printf("\n");
}
}
/****************/
/* Funzione che realizza:
* a = b * c mod n */
void modular_multiplication(mpz_t a, mpz_t b, mpz_t c, mpz_t n) {
mpz_mul (a, b, c);
mpz_mod (a, a, n);
}
/* Funzione che ritorna l'i-mo bit della k-ma
* riga della matrice M */
unsigned get_k_i(word ** M, unsigned long k,
unsigned long i) {
unsigned long I = i / N_BITS;
unsigned long n_shift = N_BITS - ((i % N_BITS ) + 1);
return (get_matrix_l(M,k,I) >> n_shift) & 1;
}
void set_k_i(word ** M, unsigned long k,
unsigned long i, unsigned int value) {
unsigned long I = i / N_BITS;
unsigned long n_shift = N_BITS - ((i % N_BITS ) + 1);
word b = get_matrix_l(M, k, I);
//printf("I=%lu, n_s=%lu, ", I, n_shift);
//print_bits(((unsigned long) value % 2) << n_shift);
//printf(" - ");
b = b | (((unsigned long) value % 2UL) << n_shift);
//printf("b=%lu\n", b);
//print_bits(b);
//printf("\n");
set_matrix_l(M, k, I, b);
}
/* Funzione che esegue la somma modulo 2 dei vettori
* v(k) = v(j) + v(k). Utilizzo lo XOR bit a bit che corrisponde
* alla somma in modulo 2. Eseguo lo XOR tra ogni blocco dei vettori */
void add_vector_z2(word ** M, unsigned long k,
unsigned long j, unsigned long n_blocks) {
for(unsigned long I = 0; I < n_blocks; ++I) {
word b = get_matrix_l(M, k, I) ^ get_matrix_l(M, j, I);
set_matrix_l(M, k, I, b);
}
}
/* Funzione che esegue la somma in Z dei vettori
* v(k) = v(j) + v(k) */
void add_vector_z(unsigned int ** M, unsigned long k,
unsigned long j, unsigned long n_col) {
for(unsigned long i = 0; i < n_col; ++i) {
unsigned int sum = get_matrix(M, k, i) + get_matrix(M, j, i);
set_matrix(M, k, i, sum);
}
}
/* Funzione che setta la struttura row_stats con le informazioni
* sulle righe della matrice (ultimo bit a dx e numero bit a 1) */
void get_wt_k(word ** M, unsigned long k, unsigned long n_col,
struct row_stats * wt) {
// Inizializzo indicando l'ultimo bit nella posizione dopo l'ultima
//wt->b_dx = n_blocks * N_BITS;
wt->b_dx = n_col;
wt->n_bit = 0;
// Scorro partendo dalla fine fino a trovare il primo 1
unsigned long i = 0;
while(get_k_i(M, k, i) == 0 && i < n_col) {
//printf("%d", get_k_i(M, k, i));
++i;
}
//printf("\n %lu:%d", i, get_k_i(M, k, i));
// printf("\n");
/*
for(int ii = 0; ii < n_col; ++ii)
for(int kk=0; kk < 1; kk++)
printf("%d", get_k_i(M, kk, ii));
printf("\n");
*/
// Se ho raggiunto la fine non ci sono bit a 1 ed esco
if(i >= n_col)
return;
wt->b_dx = i;
for(i = i; i < n_col; ++i)
if(get_k_i(M, k, i))
wt->n_bit++;
}
/* Funzione che esegue l'eliminazione gaussiana */
void gaussian_elimination_mod_2(unsigned int ** M_z,
word ** M_z2,
mpz_t * Q_A,
mpz_t N,
unsigned long n_row,
unsigned long n_col,
unsigned long n_blocks,
struct row_stats wt[]) {
for(unsigned long i = 0; i < n_col; ++i) {
unsigned long j;
for(j = 0; j < n_row && wt[j].b_dx != i; ++j)
;// avanzo j e basta
for(unsigned k = j + 1; k < n_row; ++k) {
if(get_k_i(M_z2, k, i)) { // il bit v(k)(i) deve essere a 1
add_vector_z2(M_z2, k, j, n_blocks); // v(k) = v(k) + v(j) mod 2
add_vector_z(M_z, k, j, n_col); // v(k) = v(k) + v(j)
//gmp_printf("%Zd * %Zd = ", Q_A[k], Q_A[j]);
modular_multiplication(Q_A[k], Q_A[k], Q_A[j], N); // Q(Ak) = Q(Ak) * Q(Aj)
//gmp_printf("%Zd\n", Q_A[k]);
get_wt_k(M_z2, k, n_col, & wt[k]); // aggiorno wt
}
}
//printf("\n");
//print_all(M_z2, n_row, n_blocks);
//printf("\n-----------\n");
}
}
/* Funzione che ritorna se una riga, nella matrice a blocchi mod 2,
* è nulla */
int row_is_null(word ** M_z2, unsigned long k, unsigned long n_col,
unsigned long n_blocks) {
for(unsigned long i = 0; i < n_blocks-1; ++i) {
if(get_matrix_l(M_z2, k, i) != 0)
return 0;
}
unsigned long n_shift = N_BITS - (n_col % N_BITS);
/* 01010 ... 110XXX ... X <- X è il padding
*
* 1111111111111111111111 <- è MAX_TYPE (tutti 1)
* MAX_TYPE << n_shift =
* 1111111111111000000000 <- mettendo in & ignoro il padding*/
print_bits(MAX_TYPE << n_shift);
printf("\n");
word b = get_matrix_l(M_z2, k, n_blocks-1) & MAX_TYPE << n_shift;
if(b != 0)
return 0;
return 1;
}
void congruence_relation(mpz_t N, // numero da fattorizzare
unsigned int * factor_base,
word ** M_z2, // esponenti mod 2
unsigned int ** M_z, // esponenti interi
mpz_t * Q_a, // vettore Q(Ai)
struct row_stats * wt, // zeri sulle righe
unsigned long n_row,
unsigned long n_primes) {
mpz_t mpz_temp;
mpz_init(mpz_temp);
mpz_t mpz_prime;
mpz_init(mpz_prime);
mpz_t X;
mpz_init(X);
mpz_t Y;
mpz_init(Y);
mpz_t m;
mpz_init(m);
mpz_t q;
mpz_init(q);
unsigned int exp;
for(unsigned long i = 0; i < n_row; ++i)
if(wt[i].n_bit == 0) { // dipendenza trovata
mpz_set_ui(Y, 1);
for(int j = 0; j < n_primes; ++j) {
mpz_set_ui(mpz_prime, factor_base[j]);
//gmp_printf("prime=%Zd\n", mpz_prime);
exp = get_matrix(M_z, i, j) / 2;
//printf("ok\n");
// temp = (factor_base[j])^(M_z[i][j]) mod N
mpz_powm_ui(mpz_temp, mpz_prime, exp, N);
//gmp_printf("temp = %Zd = %Zd^%lu\n", mpz_temp, mpz_prime, exp);
// Y = Y * temp mod N
modular_multiplication(Y, Y, mpz_temp, N);
//gmp_printf("Y = %Zd\n", mpz_temp);
}
mpz_set(X, Q_a[i]);
//gmp_printf("(A+s) = %Zd\n", Q_a[i]);
gmp_printf("mcd(%Zd + %Zd, %Zd) = ", X, Y, N);
mpz_add(X, X, Y); // X = X + Y
mpz_gcd(m, X, N); // m = mcd(X + Y, N)
gmp_printf("%Zd", m);
mpz_divexact(q, N, m); // q = N / m;
//gmp_printf("%Zd * %Zd = %Zd, N = ", m, q, N);
if(mpz_cmp(m, N) < 0 && mpz_cmp_ui(m, 1) > 0) { // fatt. non banale
gmp_printf(", N = %Zd * %Zd\n", m, q);
}
else
printf("\n");
}
//mpz_clears(mpz_temp, mpz_prime, X, Y, m, q);
}
/*****************************************************/
int main() {
word ** M;
unsigned int ** M_z;
unsigned long n_primes = 15;
unsigned long n_blocchi = 1;//n_primes / N_BIT
double t1, t2;
unsigned int poly_val_num = 12800;
mpz_t N;
mpz_init(N);
mpz_set_str(N, "8616460799", 10);
unsigned int factor_base[15] = {2, 5, 7, 11, 17, 23, 37, 47, 59, 67, 71, 83, 89, 97, 101};
pair solutions[15];
unsigned c = 0;
solutions[c].sol1 = 1;
solutions[c++].sol2 = 1;
solutions[c].sol1 = 3;
solutions[c++].sol2 = 4;
solutions[c].sol1 = 6;
solutions[c++].sol2 = 0;
solutions[c].sol1 = 6;
solutions[c++].sol2 = 4;
solutions[c].sol1 = 15;
solutions[c++].sol2 = 11;
solutions[c].sol1 = 7;
solutions[c++].sol2 = 1;
solutions[c].sol1 = 21;
solutions[c++].sol2 = 34;
solutions[c].sol1 = 15;
solutions[c++].sol2 = 34;
solutions[c].sol1 = 9;
solutions[c++].sol2 = 16;
solutions[c].sol1 = 51;
solutions[c++].sol2 = 25;
solutions[c].sol1 = 69;
solutions[c++].sol2 = 19;
solutions[c].sol1 = 68;
solutions[c++].sol2 = 38;
solutions[c].sol1 = 8;
solutions[c++].sol2 = 87;
solutions[c].sol1 = 52;
solutions[c++].sol2 = 55;
solutions[c].sol1 = 34;
solutions[c++].sol2 = 57;
/*
for(int k = 0; k < n_primes; ++k)
printf("%d: xp=%d, yp=%d\n", factor_base[k], solutions[k].sol1, solutions[k].sol2);
printf("\n");
*/
t1 = omp_get_wtime();
unsigned int ** exponents;
init_matrix(& exponents, poly_val_num, n_primes);
for(int i = 0; i < poly_val_num; ++i)
for(int j = 0; j < n_primes; ++j)
set_matrix(exponents, i, j, 0);
print_M(exponents, poly_val_num, n_primes);
mpz_t * Q_A;
init_vector_mpz(& Q_A, poly_val_num);
unsigned int n_fatt;
n_fatt = sieve(N, factor_base, n_primes, solutions, exponents, Q_A, poly_val_num);
//printf("\n");
//printf("n_fatt:%d\n\n", n_fatt);
//print_M_con_i(exponents, poly_val_num, n_primes);
//init_matrix(& M_z, n_fatt, n_primes);
init_matrix_l(& M, n_fatt, n_blocchi);
//unsigned int f_c[] = {34, 453, 1134, 3143, 3388, 4514, 4808, 5251, 6033, 6263, 6683, 7508, 8494, 9086, 10233, 12379, 12799};
//for(int i = 0; i < n_fatt; ++i)
// for(int j = 0; j < n_primes; ++j)
// set_matrix(M_z, i, j, get_matrix(exponents, f_c[i], j));
//print_M(exponents, n_fatt, n_primes);
/*
for(int i = 0; i < n_fatt; ++i)
for(int j = 0; j < n_primes; ++j)
set_matrix(M_z, i, j, rand() % 10);
*/
for(int i = 0; i < n_fatt; ++i)
for(int j = 0; j < n_primes; ++j) {
set_k_i(M, i, j, 0);
}
for(int i = 0; i < n_fatt; ++i)
for(int j = 0; j < n_primes; ++j) {
unsigned int a = get_matrix(exponents, i, j);
set_k_i(M, i, j, a);
}
//printf("\n");
//print_all(M, n_fatt, n_blocchi);
struct row_stats * wt = malloc(sizeof(struct row_stats) * n_fatt);
int n_threads = omp_get_num_threads();
int chunck = n_fatt/n_threads;
//#pragma omp parallel for schedule(dynamic, chunck)
for(int i = 0; i < n_fatt; ++i)
get_wt_k(M, i, n_primes, & wt[i]);
t2 = omp_get_wtime();
double t_set_up = t2 - t1;
t1 = omp_get_wtime();
gaussian_elimination_mod_2(exponents, M, Q_A, N, n_fatt, n_primes, n_blocchi, wt);
t2 = omp_get_wtime();
double t_gauss = t2 - t1;
//printf("\n\ngauss:\n");
//print_M(exponents, n_fatt, n_primes);
//printf("\n");
//print_all(M, n_fatt, n_blocchi);
//printf("\n");
mpz_t temp;
mpz_init(temp);
/*
for(int i = 0; i < n_fatt; ++i) {
//mpz_sqrt(temp, Q_A[i]);
//gmp_printf ("%Zd\n", temp);
gmp_printf ("%Zd\n", Q_A[i]);
}
*/
//printf("\n");
congruence_relation(N, factor_base, M, exponents, Q_A, wt, n_fatt, n_primes);
/*
if(row_is_null(M, 16, n_primes, n_blocchi))
printf("test1 ok\n");
else
printf("test1 errore\n");
set_matrix_l(M, 16, 0, 1);
print_bits(get_matrix_l(M, 16, 0));
printf("\n");
if(row_is_null(M, 16, n_primes, n_blocchi))
printf("test2 ok\n");
else
printf("test2 errore\n");
*/
printf("#time_gauss time_set_up time_totale\n");
printf("%.6f ", t_gauss);
printf("%.6f ", t_set_up);
printf("%.6f\n", t_gauss + t_set_up);
}
|
elect_energy_avx2.c | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <immintrin.h>
/* gcc -o evec1 elect_energy_vec_01.c -O4 -lm -fopenmp -march=native */
int main(int argc, char **argv) {
struct timespec ts_start, ts_end;
float time_total;
int i,j,m;
int size = 60;
int n_charges = size*size*size;
float scale=0.5;
__m256 *X, *Y, *Z, *Q;
__m256 tmpQ[8], tmpX[8], tmpY[8], tmpZ[8];
/* We need an extra block of 8 floats when n_charges is not a multiple of 8 */
X = aligned_alloc(32, (n_charges+8) * sizeof(float));
Y = aligned_alloc(32, (n_charges+8) * sizeof(float));
Z = aligned_alloc(32, (n_charges+8) * sizeof(float));
Q = aligned_alloc(32, (n_charges+8) * sizeof(float));
/* Initialize X,Y,Z,Q arrays with 256-bit long vectors */
int ix,iy,iz;
float tmp_vec[4][8] __attribute__ ((aligned (32)));
long v_element_count = 0;
long v_count = 0;
for (ix=0; ix<size; ix++)
for (iy=0; iy<size; iy++)
for (iz=0; iz<size; iz++)
{
tmp_vec[0][v_element_count] = ix*scale;
tmp_vec[1][v_element_count] = iy*scale;
tmp_vec[2][v_element_count] = iz*scale;
tmp_vec[3][v_element_count] = 0.33;
v_element_count++;
/* when 8 elements are computed pack them into _m256 vectors */
if ( v_element_count == 8 ) {
X[v_count] = _mm256_set_ps( \
tmp_vec[0][7],tmp_vec[0][6],tmp_vec[0][5],tmp_vec[0][4], \
tmp_vec[0][3],tmp_vec[0][2],tmp_vec[0][1],tmp_vec[0][0]);
Y[v_count] = _mm256_set_ps( \
tmp_vec[1][7],tmp_vec[1][6],tmp_vec[1][5],tmp_vec[1][4], \
tmp_vec[1][3],tmp_vec[1][2],tmp_vec[1][1],tmp_vec[1][0]);
Z[v_count] = _mm256_set_ps( \
tmp_vec[2][7],tmp_vec[2][6],tmp_vec[2][5],tmp_vec[2][4], \
tmp_vec[2][3],tmp_vec[2][2],tmp_vec[2][1],tmp_vec[2][0]);
Q[v_count] = _mm256_set_ps( \
tmp_vec[3][7],tmp_vec[3][6],tmp_vec[3][5],tmp_vec[3][4], \
tmp_vec[3][3],tmp_vec[3][2],tmp_vec[3][1],tmp_vec[3][0]);
v_count++;
v_element_count=0;
memset(tmp_vec,0,32*sizeof(float));
}
}
/* Treat the remainder. The last vector is padded with zeros */
if ( v_element_count !=0 ) {
X[v_count] = _mm256_set_ps( \
tmp_vec[0][7],tmp_vec[0][6],tmp_vec[0][5],tmp_vec[0][4], \
tmp_vec[0][3],tmp_vec[0][2],tmp_vec[0][1],tmp_vec[0][0]);
Y[v_count] = _mm256_set_ps( \
tmp_vec[1][7],tmp_vec[1][6],tmp_vec[1][5],tmp_vec[1][4], \
tmp_vec[1][3],tmp_vec[1][2],tmp_vec[1][1],tmp_vec[1][0]);
Z[v_count] = _mm256_set_ps( \
tmp_vec[2][7],tmp_vec[2][6],tmp_vec[2][5],tmp_vec[2][4], \
tmp_vec[2][3],tmp_vec[2][2],tmp_vec[2][1],tmp_vec[2][0]);
Q[v_count] = _mm256_set_ps( \
tmp_vec[3][7],tmp_vec[3][6],tmp_vec[3][5],tmp_vec[3][4], \
tmp_vec[3][3],tmp_vec[3][2],tmp_vec[3][1],tmp_vec[3][0]);
v_count++;
}
double VC=0;
__m256 r_vec, result, vcps;
__m256 diff[8], mask[8];
float tmp_add[8] __attribute__ ((aligned (32)));
/* mask upper triangular elements */
mask[0]=(__m256)_mm256_set_epi32(-1,-1,-1,-1,-1,-1,-1, 0);
mask[1]=(__m256)_mm256_set_epi32(-1,-1,-1,-1,-1,-1, 0, 0);
mask[2]=(__m256)_mm256_set_epi32(-1,-1,-1,-1,-1, 0, 0, 0);
mask[3]=(__m256)_mm256_set_epi32(-1,-1,-1,-1, 0, 0, 0, 0);
mask[4]=(__m256)_mm256_set_epi32(-1,-1,-1, 0, 0, 0, 0, 0);
mask[5]=(__m256)_mm256_set_epi32(-1,-1, 0, 0, 0, 0, 0, 0);
mask[6]=(__m256)_mm256_set_epi32(-1, 0, 0, 0, 0, 0, 0, 0);
mask[7]=(__m256)_mm256_set_epi32( 0, 0, 0, 0, 0, 0, 0, 0);
clock_gettime(CLOCK_MONOTONIC, &ts_start);
#pragma omp parallel for private(tmpQ,tmpX,tmpY,tmpZ,i,j,m,diff,r_vec,vcps,tmp_add,result) reduction(+:VC) schedule(dynamic)
for(i=0; i<v_count; i++) {
tmpQ[0] = _mm256_broadcast_ss(&Q[i][0]);
tmpQ[1] = _mm256_broadcast_ss(&Q[i][1]);
tmpQ[2] = _mm256_broadcast_ss(&Q[i][2]);
tmpQ[3] = _mm256_broadcast_ss(&Q[i][3]);
tmpQ[4] = _mm256_broadcast_ss(&Q[i][4]);
tmpQ[5] = _mm256_broadcast_ss(&Q[i][5]);
tmpQ[6] = _mm256_broadcast_ss(&Q[i][6]);
tmpQ[7] = _mm256_broadcast_ss(&Q[i][7]);
tmpX[0] = _mm256_broadcast_ss(&X[i][0]);
tmpX[1] = _mm256_broadcast_ss(&X[i][1]);
tmpX[2] = _mm256_broadcast_ss(&X[i][2]);
tmpX[3] = _mm256_broadcast_ss(&X[i][3]);
tmpX[4] = _mm256_broadcast_ss(&X[i][4]);
tmpX[5] = _mm256_broadcast_ss(&X[i][5]);
tmpX[6] = _mm256_broadcast_ss(&X[i][6]);
tmpX[7] = _mm256_broadcast_ss(&X[i][7]);
tmpY[0] = _mm256_broadcast_ss(&Y[i][0]);
tmpY[1] = _mm256_broadcast_ss(&Y[i][1]);
tmpY[2] = _mm256_broadcast_ss(&Y[i][2]);
tmpY[3] = _mm256_broadcast_ss(&Y[i][3]);
tmpY[4] = _mm256_broadcast_ss(&Y[i][4]);
tmpY[5] = _mm256_broadcast_ss(&Y[i][5]);
tmpY[6] = _mm256_broadcast_ss(&Y[i][6]);
tmpY[7] = _mm256_broadcast_ss(&Y[i][7]);
tmpZ[0] = _mm256_broadcast_ss(&Z[i][0]);
tmpZ[1] = _mm256_broadcast_ss(&Z[i][1]);
tmpZ[2] = _mm256_broadcast_ss(&Z[i][2]);
tmpZ[3] = _mm256_broadcast_ss(&Z[i][3]);
tmpZ[4] = _mm256_broadcast_ss(&Z[i][4]);
tmpZ[5] = _mm256_broadcast_ss(&Z[i][5]);
tmpZ[6] = _mm256_broadcast_ss(&Z[i][6]);
tmpZ[7] = _mm256_broadcast_ss(&Z[i][7]);
/* Accumulate coupling between all lower triangular elements of the diagonal 8x8 blocks */
vcps = _mm256_setzero_ps();
for(m=0; m<8; m++)
{
/* dx,dy,dz */
diff[0] = _mm256_sub_ps(tmpX[m],X[i]);
diff[1] = _mm256_sub_ps(tmpY[m],Y[i]);
diff[2] = _mm256_sub_ps(tmpZ[m],Z[i]);
/* dx*dx + dy*dy + dz*dz */
r_vec = _mm256_fmadd_ps(diff[0],diff[0],_mm256_setzero_ps());
r_vec = _mm256_fmadd_ps(diff[1],diff[1],r_vec);
r_vec = _mm256_fmadd_ps(diff[2],diff[2],r_vec);
/* distance^-1 */
r_vec = _mm256_rsqrt_ps(r_vec);
/* Q[m]*Q[i]*distance^-1 */
result = _mm256_mul_ps(tmpQ[m],Q[i]);
result = _mm256_mul_ps(result,r_vec);
result = _mm256_and_ps(mask[m],result);
vcps = _mm256_add_ps(vcps,result);
}
/* transfer vcps to double precision accumulator VC */
_mm256_store_ps(tmp_add,vcps);
VC += tmp_add[0] + tmp_add[1] + tmp_add[2] + tmp_add[3] + tmp_add[4] + tmp_add[5] + tmp_add[6] + tmp_add[7];
/* Accumulate coupling between all elemnts of lower triangular 8x8 blocks */
for(j=i+1 ; j<v_count; j++)
{
vcps = _mm256_setzero_ps();
for(m=0; m<8; m++)
{
diff[0] = _mm256_sub_ps(tmpX[m],X[j]);
diff[1] = _mm256_sub_ps(tmpY[m],Y[j]);
diff[2] = _mm256_sub_ps(tmpZ[m],Z[j]);
r_vec = _mm256_fmadd_ps(diff[0],diff[0],_mm256_setzero_ps());
r_vec = _mm256_fmadd_ps(diff[1],diff[1],r_vec);
r_vec = _mm256_fmadd_ps(diff[2],diff[2],r_vec);
r_vec = _mm256_rsqrt_ps(r_vec);
result = _mm256_mul_ps(tmpQ[m],Q[j]);
vcps = _mm256_fmadd_ps(result,r_vec,vcps);
}
_mm256_store_ps(tmp_add,vcps);
VC += tmp_add[0] + tmp_add[1] + tmp_add[2] + tmp_add[3] + tmp_add[4] + tmp_add[5] + tmp_add[6] + tmp_add[7];
}
}
clock_gettime(CLOCK_MONOTONIC, &ts_end);
time_total = (ts_end.tv_sec - ts_start.tv_sec)*1e9 + (ts_end.tv_nsec - ts_start.tv_nsec);
printf("\nTotal time is %f ms, Energy is %f\n", time_total/1e6,VC);
printf("%i\n", v_count);
}
|
Tanh.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/Tanh.c"
#else
void THNN_(Tanh_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output)
{
THTensor_(tanh)(output, input);
}
void THNN_(Tanh_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
THTensor *output)
{
THNN_CHECK_SHAPE(output, gradOutput);
THTensor_(resizeAs)(gradInput, output);
if (output->nDimension == 1 ||
!THTensor_(isContiguous)(output) ||
!THTensor_(isContiguous)(gradOutput) ||
!THTensor_(isContiguous)(gradInput))
{
TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, output,
real z = *output_data; \
*gradInput_data = *gradOutput_data * (1. - z*z);
);
}
else
{
real* ptr_gradOutput = THTensor_(data)(gradOutput);
real* ptr_gradInput = THTensor_(data)(gradInput);
real* ptr_output = THTensor_(data)(output);
int64_t i;
#pragma omp parallel for private(i)
for (i = 0; i < THTensor_(nElement)(gradInput); i++)
{
real z = ptr_output[i];
ptr_gradInput[i] = ptr_gradOutput[i] * (1. - z*z);
}
}
}
#endif
|
par_2s_interp.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildModExtInterp
* Comment:
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildModPartialExtInterp(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_BigInt *num_old_cpts_global,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A);
hypre_ParCSRCommHandle *comm_handle = NULL;
hypre_ParCSRCommPkg *comm_pkg = NULL;
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt total_global_cpts;
HYPRE_BigInt total_old_global_cpts;
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/* Intermediate matrices */
hypre_ParCSRMatrix *As_FF, *As_FC, *W;
HYPRE_Real *D_q, *D_w;
HYPRE_Real *D_q_offd = NULL;
hypre_CSRMatrix *As_FF_diag;
hypre_CSRMatrix *As_FF_offd;
hypre_CSRMatrix *As_FC_diag;
hypre_CSRMatrix *As_FC_offd;
hypre_CSRMatrix *W_diag;
hypre_CSRMatrix *W_offd;
HYPRE_Int *As_FF_diag_i;
HYPRE_Int *As_FF_diag_j;
HYPRE_Int *As_FF_offd_i;
HYPRE_Int *As_FF_offd_j;
HYPRE_Int *As_FC_diag_i;
HYPRE_Int *As_FC_offd_i;
HYPRE_Int *W_diag_i;
HYPRE_Int *W_offd_i;
HYPRE_Int *W_diag_j;
HYPRE_Int *W_offd_j;
HYPRE_Real *As_FF_diag_data;
HYPRE_Real *As_FF_offd_data;
HYPRE_Real *As_FC_diag_data;
HYPRE_Real *As_FC_offd_data;
HYPRE_Real *W_diag_data;
HYPRE_Real *W_offd_data;
HYPRE_Real *buf_data = NULL;
HYPRE_BigInt *col_map_offd_P = NULL;
HYPRE_BigInt *new_col_map_offd = NULL;
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int num_cols_A_FF_offd;
HYPRE_Int new_ncols_P_offd;
HYPRE_Int num_cols_P_offd;
HYPRE_Int *P_marker = NULL;
/* Loop variables */
HYPRE_Int index;
HYPRE_Int i, j;
HYPRE_Int *cpt_array;
HYPRE_Int *new_fpt_array;
HYPRE_Int *start_array;
HYPRE_Int *new_fine_to_fine;
HYPRE_Int start, stop, startf, stopf, startnewf, stopnewf;
HYPRE_Int cnt_diag, cnt_offd, row, c_pt, fpt;
HYPRE_Int startc, num_sends;
/* Definitions */
//HYPRE_Real wall_time;
HYPRE_Int n_Cpts, n_Fpts, n_old_Cpts, n_new_Fpts;
HYPRE_Int num_threads = hypre_NumThreads();
//if (debug_flag==4) wall_time = time_getWallclockSeconds();
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
if (my_id == (num_procs -1)) total_old_global_cpts = num_old_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
hypre_MPI_Bcast(&total_old_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
n_Cpts = num_cpts_global[1]-num_cpts_global[0];
n_old_Cpts = num_old_cpts_global[1]-num_old_cpts_global[0];
#else
total_global_cpts = num_cpts_global[num_procs];
total_old_global_cpts = num_old_cpts_global[num_procs];
n_Cpts = num_cpts_global[my_id+1]-num_cpts_global[my_id];
n_old_Cpts = num_old_cpts_global[my_id+1]-num_old_cpts_global[my_id];
#endif
hypre_ParCSRMatrixGenerateFFFC3(A, CF_marker, num_cpts_global, S, &As_FC, &As_FF);
As_FC_diag = hypre_ParCSRMatrixDiag(As_FC);
As_FC_diag_i = hypre_CSRMatrixI(As_FC_diag);
As_FC_diag_data = hypre_CSRMatrixData(As_FC_diag);
As_FC_offd = hypre_ParCSRMatrixOffd(As_FC);
As_FC_offd_i = hypre_CSRMatrixI(As_FC_offd);
As_FC_offd_data = hypre_CSRMatrixData(As_FC_offd);
As_FF_diag = hypre_ParCSRMatrixDiag(As_FF);
As_FF_diag_i = hypre_CSRMatrixI(As_FF_diag);
As_FF_diag_j = hypre_CSRMatrixJ(As_FF_diag);
As_FF_diag_data = hypre_CSRMatrixData(As_FF_diag);
As_FF_offd = hypre_ParCSRMatrixOffd(As_FF);
As_FF_offd_i = hypre_CSRMatrixI(As_FF_offd);
As_FF_offd_j = hypre_CSRMatrixJ(As_FF_offd);
As_FF_offd_data = hypre_CSRMatrixData(As_FF_offd);
n_new_Fpts = hypre_CSRMatrixNumRows(As_FF_diag);
n_Fpts = hypre_CSRMatrixNumRows(As_FC_diag);
n_new_Fpts = n_old_Cpts - n_Cpts;
num_cols_A_FF_offd = hypre_CSRMatrixNumCols(As_FF_offd);
D_q = hypre_CTAlloc(HYPRE_Real, n_Fpts, memory_location_P);
new_fine_to_fine = hypre_CTAlloc(HYPRE_Int, n_new_Fpts, HYPRE_MEMORY_HOST);
D_w = hypre_CTAlloc(HYPRE_Real, n_new_Fpts, memory_location_P);
cpt_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
new_fpt_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
start_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,j,start,stop,startf,stopf,startnewf,stopnewf,row,fpt)
#endif
{
HYPRE_Int my_thread_num = hypre_GetThreadNum();
HYPRE_Real beta, gamma;
start = (n_fine/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{
stop = n_fine;
}
else
{
stop = (n_fine/num_threads)*(my_thread_num+1);
}
start_array[my_thread_num+1] = stop;
row = 0;
for (i=start; i < stop; i++)
{
if (CF_marker[i] > 0)
{
cpt_array[my_thread_num]++;
}
else if (CF_marker[i] == -2)
{
new_fpt_array[my_thread_num]++;
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
for (i=1; i < num_threads; i++)
{
cpt_array[i] += cpt_array[i-1];
new_fpt_array[i] += new_fpt_array[i-1];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num > 0)
{
startf = start - cpt_array[my_thread_num-1];
}
else
{
startf = 0;
}
if (my_thread_num < num_threads-1)
{
stopf = stop - cpt_array[my_thread_num];
}
else
{
stopf = n_Fpts;
}
/* Create D_q = D_beta */
for (i=startf; i < stopf; i++)
{
for (j=As_FC_diag_i[i]; j < As_FC_diag_i[i+1]; j++)
{
D_q[i] += As_FC_diag_data[j];
}
for (j=As_FC_offd_i[i]; j < As_FC_offd_i[i+1]; j++)
{
D_q[i] += As_FC_offd_data[j];
}
}
row = 0;
if (my_thread_num) row = new_fpt_array[my_thread_num-1];
fpt = startf;
for (i=start; i < stop; i++)
{
if (CF_marker[i] == -2)
{
new_fine_to_fine[row++] = fpt++;
}
else if (CF_marker[i] < 0)
{
fpt++;
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
if (num_cols_A_FF_offd)
{
D_q_offd = hypre_CTAlloc(HYPRE_Real, num_cols_A_FF_offd, memory_location_P);
}
index = 0;
comm_pkg = hypre_ParCSRMatrixCommPkg(As_FF);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(As_FF);
comm_pkg = hypre_ParCSRMatrixCommPkg(As_FF);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), memory_location_P);
for (i = 0; i < num_sends; i++)
{
startc = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = startc; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
buf_data[index++] = D_q[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, buf_data, D_q_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* Create D_w = D_alpha + D_gamma */
row = 0;
if (my_thread_num) row = new_fpt_array[my_thread_num-1];
for (i=start; i < stop; i++)
{
if (CF_marker[i] == -2)
{
for (j=A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
D_w[row] += A_diag_data[j];
}
for (j=A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
D_w[row] += A_offd_data[j];
}
for (j=As_FF_diag_i[row]+1; j < As_FF_diag_i[row+1]; j++)
{
if (D_q[As_FF_diag_j[j]]) D_w[row] -= As_FF_diag_data[j];
}
for (j=As_FF_offd_i[row]; j < As_FF_offd_i[row+1]; j++)
{
if (D_q_offd[As_FF_offd_j[j]]) D_w[row] -= As_FF_offd_data[j];
}
D_w[row] -= D_q[new_fine_to_fine[row]];
row++;
}
}
startnewf = 0;
if (my_thread_num) startnewf = new_fpt_array[my_thread_num-1];
stopnewf = new_fpt_array[my_thread_num];
for (i=startnewf; i<stopnewf; i++)
{
j = As_FF_diag_i[i];
if (D_w[i])
{
beta = 1.0/D_w[i];
As_FF_diag_data[j] = beta*D_q[new_fine_to_fine[i]];
for (j=As_FF_diag_i[i]+1; j < As_FF_diag_i[i+1]; j++)
As_FF_diag_data[j] *= beta;
for (j=As_FF_offd_i[i]; j < As_FF_offd_i[i+1]; j++)
As_FF_offd_data[j] *= beta;
}
}
for (i=startf; i<stopf; i++)
{
if (D_q[i]) gamma = -1.0/D_q[i];
else gamma = 0.0;
for (j=As_FC_diag_i[i]; j < As_FC_diag_i[i+1]; j++)
As_FC_diag_data[j] *= gamma;
for (j=As_FC_offd_i[i]; j < As_FC_offd_i[i+1]; j++)
As_FC_offd_data[j] *= gamma;
}
} /* end parallel region */
W = hypre_ParMatmul(As_FF, As_FC);
W_diag = hypre_ParCSRMatrixDiag(W);
W_offd = hypre_ParCSRMatrixOffd(W);
W_diag_i = hypre_CSRMatrixI(W_diag);
W_diag_j = hypre_CSRMatrixJ(W_diag);
W_diag_data = hypre_CSRMatrixData(W_diag);
W_offd_i = hypre_CSRMatrixI(W_offd);
W_offd_j = hypre_CSRMatrixJ(W_offd);
W_offd_data = hypre_CSRMatrixData(W_offd);
num_cols_P_offd = hypre_CSRMatrixNumCols(W_offd);
/*-----------------------------------------------------------------------
* Intialize data for P
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_old_Cpts+1, memory_location_P);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_old_Cpts+1, memory_location_P);
P_diag_size = n_Cpts + hypre_CSRMatrixI(W_diag)[n_new_Fpts];
P_offd_size = hypre_CSRMatrixI(W_offd)[n_new_Fpts];
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,j,start,stop,startnewf,stopnewf,c_pt,row,cnt_diag,cnt_offd)
#endif
{
HYPRE_Int rowp;
HYPRE_Int my_thread_num = hypre_GetThreadNum();
start = start_array[my_thread_num];
stop = start_array[my_thread_num+1];
if (my_thread_num > 0)
c_pt = cpt_array[my_thread_num-1];
else
c_pt = 0;
row = 0;
if (my_thread_num) row = new_fpt_array[my_thread_num-1];
rowp = row;
if (my_thread_num > 0) rowp = row+cpt_array[my_thread_num-1];
cnt_diag = W_diag_i[row]+c_pt;
cnt_offd = W_offd_i[row];
for (i=start; i < stop; i++)
{
if (CF_marker[i] > 0)
{
rowp++;
P_diag_j[cnt_diag] = c_pt++;
P_diag_data[cnt_diag++] = 1.0;
P_diag_i[rowp] = cnt_diag;
P_offd_i[rowp] = cnt_offd;
}
else if (CF_marker[i] == -2)
{
rowp++;
for (j=W_diag_i[row]; j < W_diag_i[row+1]; j++)
{
P_diag_j[cnt_diag] = W_diag_j[j];
P_diag_data[cnt_diag++] = W_diag_data[j];
}
for (j=W_offd_i[row]; j < W_offd_i[row+1]; j++)
{
P_offd_j[cnt_offd] = W_offd_j[j];
P_offd_data[cnt_offd++] = W_offd_data[j];
}
row++;
P_diag_i[rowp] = cnt_diag;
P_offd_i[rowp] = cnt_offd;
}
}
} /* end parallel region */
/*-----------------------------------------------------------------------
* Create matrix
*-----------------------------------------------------------------------*/
P = hypre_ParCSRMatrixCreate(comm,
total_old_global_cpts,
total_global_cpts,
num_old_cpts_global,
num_cpts_global,
num_cols_P_offd,
P_diag_i[n_old_Cpts],
P_offd_i[n_old_Cpts]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
hypre_ParCSRMatrixColMapOffd(P) = hypre_ParCSRMatrixColMapOffd(W);
hypre_ParCSRMatrixColMapOffd(W) = NULL;
hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P;
hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
HYPRE_Int *map;
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_old_Cpts];
P_offd_size = P_offd_i[n_old_Cpts];
col_map_offd_P = hypre_ParCSRMatrixColMapOffd(P);
if (num_cols_P_offd)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
for (i=0; i < P_offd_size; i++)
{
P_marker[P_offd_j[i]] = 1;
}
new_ncols_P_offd = 0;
for (i=0; i < num_cols_P_offd; i++)
if (P_marker[i]) new_ncols_P_offd++;
new_col_map_offd = hypre_CTAlloc(HYPRE_BigInt, new_ncols_P_offd, HYPRE_MEMORY_HOST);
map = hypre_CTAlloc(HYPRE_Int, new_ncols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
if (P_marker[i])
{
new_col_map_offd[index] = col_map_offd_P[i];
map[index++] = i;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
{
P_offd_j[i] = hypre_BinarySearch(map, P_offd_j[i],
new_ncols_P_offd);
}
hypre_TFree(col_map_offd_P, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixColMapOffd(P) = new_col_map_offd;
hypre_CSRMatrixNumCols(P_offd) = new_ncols_P_offd;
hypre_TFree(map, HYPRE_MEMORY_HOST);
}
}
hypre_MatvecCommPkgCreate(P);
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(D_q, memory_location_P);
hypre_TFree(D_q_offd, memory_location_P);
hypre_TFree(D_w, memory_location_P);
hypre_TFree(cpt_array, HYPRE_MEMORY_HOST);
hypre_TFree(new_fpt_array, HYPRE_MEMORY_HOST);
hypre_TFree(start_array, HYPRE_MEMORY_HOST);
hypre_TFree(new_fine_to_fine, HYPRE_MEMORY_HOST);
hypre_TFree(buf_data, memory_location_P);
hypre_ParCSRMatrixDestroy(As_FF);
hypre_ParCSRMatrixDestroy(As_FC);
hypre_ParCSRMatrixDestroy(W);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGBuildModPartialNewExtPIInterp(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_BigInt *num_old_cpts_global,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A);
hypre_ParCSRCommHandle *comm_handle = NULL;
hypre_ParCSRCommPkg *comm_pkg = NULL;
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt total_global_cpts;
HYPRE_BigInt total_old_global_cpts;
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/* Intermediate matrices */
hypre_ParCSRMatrix *As_FF, *As_FC, *W;
HYPRE_Real *D_q, *D_w, *D_lambda, *D_inv, *D_tau;
HYPRE_Real *D_lambda_offd = NULL, *D_inv_offd = NULL;
hypre_CSRMatrix *As_FF_diag;
hypre_CSRMatrix *As_FF_offd;
hypre_CSRMatrix *As_FC_diag;
hypre_CSRMatrix *As_FC_offd;
hypre_CSRMatrix *W_diag;
hypre_CSRMatrix *W_offd;
HYPRE_Int *As_FF_diag_i;
HYPRE_Int *As_FF_diag_j;
HYPRE_Int *As_FF_offd_i;
HYPRE_Int *As_FF_offd_j;
HYPRE_Int *As_FC_diag_i;
HYPRE_Int *As_FC_offd_i;
HYPRE_Int *W_diag_i;
HYPRE_Int *W_offd_i;
HYPRE_Int *W_diag_j;
HYPRE_Int *W_offd_j;
HYPRE_Real *As_FF_diag_data;
HYPRE_Real *As_FF_offd_data;
HYPRE_Real *As_FC_diag_data;
HYPRE_Real *As_FC_offd_data;
HYPRE_Real *W_diag_data;
HYPRE_Real *W_offd_data;
HYPRE_Real *buf_data = NULL;
HYPRE_BigInt *col_map_offd_P = NULL;
HYPRE_BigInt *new_col_map_offd = NULL;
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int num_cols_A_FF_offd;
HYPRE_Int new_ncols_P_offd;
HYPRE_Int num_cols_P_offd;
HYPRE_Int *P_marker = NULL;
/* Loop variables */
HYPRE_Int index;
HYPRE_Int i, j;
HYPRE_Int *cpt_array;
HYPRE_Int *new_fpt_array;
HYPRE_Int *start_array;
HYPRE_Int *new_fine_to_fine;
HYPRE_Int start, stop, startf, stopf, startnewf, stopnewf;
HYPRE_Int cnt_diag, cnt_offd, row, c_pt, fpt;
HYPRE_Int startc, num_sends;
/* Definitions */
//HYPRE_Real wall_time;
HYPRE_Int n_Cpts, n_Fpts, n_old_Cpts, n_new_Fpts;
HYPRE_Int num_threads = hypre_NumThreads();
//if (debug_flag==4) wall_time = time_getWallclockSeconds();
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
if (my_id == (num_procs -1)) total_old_global_cpts = num_old_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
hypre_MPI_Bcast(&total_old_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
n_Cpts = num_cpts_global[1]-num_cpts_global[0];
n_old_Cpts = num_old_cpts_global[1]-num_old_cpts_global[0];
#else
total_global_cpts = num_cpts_global[num_procs];
total_old_global_cpts = num_old_cpts_global[num_procs];
n_Cpts = num_cpts_global[my_id+1]-num_cpts_global[my_id];
n_old_Cpts = num_old_cpts_global[my_id+1]-num_old_cpts_global[my_id];
#endif
hypre_ParCSRMatrixGenerateFFFCD3(A, CF_marker, num_cpts_global, S, &As_FC, &As_FF, &D_lambda);
As_FC_diag = hypre_ParCSRMatrixDiag(As_FC);
As_FC_diag_i = hypre_CSRMatrixI(As_FC_diag);
As_FC_diag_data = hypre_CSRMatrixData(As_FC_diag);
As_FC_offd = hypre_ParCSRMatrixOffd(As_FC);
As_FC_offd_i = hypre_CSRMatrixI(As_FC_offd);
As_FC_offd_data = hypre_CSRMatrixData(As_FC_offd);
As_FF_diag = hypre_ParCSRMatrixDiag(As_FF);
As_FF_diag_i = hypre_CSRMatrixI(As_FF_diag);
As_FF_diag_j = hypre_CSRMatrixJ(As_FF_diag);
As_FF_diag_data = hypre_CSRMatrixData(As_FF_diag);
As_FF_offd = hypre_ParCSRMatrixOffd(As_FF);
As_FF_offd_i = hypre_CSRMatrixI(As_FF_offd);
As_FF_offd_j = hypre_CSRMatrixJ(As_FF_offd);
As_FF_offd_data = hypre_CSRMatrixData(As_FF_offd);
n_new_Fpts = hypre_CSRMatrixNumRows(As_FF_diag);
n_Fpts = hypre_CSRMatrixNumRows(As_FC_diag);
n_new_Fpts = n_old_Cpts - n_Cpts;
num_cols_A_FF_offd = hypre_CSRMatrixNumCols(As_FF_offd);
D_q = hypre_CTAlloc(HYPRE_Real, n_Fpts, memory_location_P);
D_inv = hypre_CTAlloc(HYPRE_Real, n_Fpts, memory_location_P);
new_fine_to_fine = hypre_CTAlloc(HYPRE_Int, n_new_Fpts, HYPRE_MEMORY_HOST);
D_w = hypre_CTAlloc(HYPRE_Real, n_new_Fpts, memory_location_P);
D_tau = hypre_CTAlloc(HYPRE_Real, n_new_Fpts, memory_location_P);
cpt_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
new_fpt_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
start_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,j,start,stop,startf,stopf,startnewf,stopnewf,row,fpt)
#endif
{
HYPRE_Int my_thread_num = hypre_GetThreadNum();
HYPRE_Real beta;
start = (n_fine/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{
stop = n_fine;
}
else
{
stop = (n_fine/num_threads)*(my_thread_num+1);
}
start_array[my_thread_num+1] = stop;
row = 0;
for (i=start; i < stop; i++)
{
if (CF_marker[i] > 0)
{
cpt_array[my_thread_num]++;
}
else if (CF_marker[i] == -2)
{
new_fpt_array[my_thread_num]++;
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
for (i=1; i < num_threads; i++)
{
cpt_array[i] += cpt_array[i-1];
new_fpt_array[i] += new_fpt_array[i-1];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num > 0)
{
startf = start - cpt_array[my_thread_num-1];
}
else
{
startf = 0;
}
if (my_thread_num < num_threads-1)
{
stopf = stop - cpt_array[my_thread_num];
}
else
{
stopf = n_Fpts;
}
/* Create D_q = D_beta, D_inv = 1/(D_q+D_lambda) */
for (i=startf; i < stopf; i++)
{
for (j=As_FC_diag_i[i]; j < As_FC_diag_i[i+1]; j++)
{
D_q[i] += As_FC_diag_data[j];
}
for (j=As_FC_offd_i[i]; j < As_FC_offd_i[i+1]; j++)
{
D_q[i] += As_FC_offd_data[j];
}
if (D_q[i]+D_lambda[i]) D_inv[i] = 1.0/(D_q[i]+D_lambda[i]);
}
row = 0;
if (my_thread_num) row = new_fpt_array[my_thread_num-1];
fpt = startf;
for (i=start; i < stop; i++)
{
if (CF_marker[i] == -2)
{
new_fine_to_fine[row++] = fpt++;
}
else if (CF_marker[i] < 0)
{
fpt++;
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
if (num_cols_A_FF_offd)
{
D_lambda_offd = hypre_CTAlloc(HYPRE_Real, num_cols_A_FF_offd, memory_location_P);
D_inv_offd = hypre_CTAlloc(HYPRE_Real, num_cols_A_FF_offd, memory_location_P);
}
index = 0;
comm_pkg = hypre_ParCSRMatrixCommPkg(As_FF);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(As_FF);
comm_pkg = hypre_ParCSRMatrixCommPkg(As_FF);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), memory_location_P);
for (i = 0; i < num_sends; i++)
{
startc = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = startc; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
buf_data[index++] = D_lambda[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, buf_data, D_lambda_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
index = 0;
for (i = 0; i < num_sends; i++)
{
startc = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = startc; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
buf_data[index++] = D_inv[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, buf_data, D_inv_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* Create D_tau */
startnewf = 0;
if (my_thread_num) startnewf = new_fpt_array[my_thread_num-1];
stopnewf = new_fpt_array[my_thread_num];
for (i=startnewf; i<stopnewf; i++)
{
for (j=As_FF_diag_i[i]+1; j < As_FF_diag_i[i+1]; j++)
{
index = As_FF_diag_j[j];
D_tau[i] += As_FF_diag_data[j]*D_lambda[index]*D_inv[index];
}
for (j=As_FF_offd_i[i]; j < As_FF_offd_i[i+1]; j++)
{
index = As_FF_offd_j[j];
D_tau[i] += As_FF_offd_data[j]*D_lambda_offd[index]*D_inv_offd[index];
}
}
/* Create D_w = D_alpha + D_gamma + D_tau */
row = 0;
if (my_thread_num) row = new_fpt_array[my_thread_num-1];
for (i=start; i < stop; i++)
{
if (CF_marker[i] == -2)
{
for (j=A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
D_w[row] += A_diag_data[j];
}
for (j=A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
D_w[row] += A_offd_data[j];
}
for (j=As_FF_diag_i[row]+1; j < As_FF_diag_i[row+1]; j++)
{
if (D_inv[As_FF_diag_j[j]]) D_w[row] -= As_FF_diag_data[j];
}
for (j=As_FF_offd_i[row]; j < As_FF_offd_i[row+1]; j++)
{
if (D_inv_offd[As_FF_offd_j[j]]) D_w[row] -= As_FF_offd_data[j];
}
D_w[row] += D_tau[row] - D_q[new_fine_to_fine[row]];
row++;
}
}
startnewf = 0;
if (my_thread_num) startnewf = new_fpt_array[my_thread_num-1];
stopnewf = new_fpt_array[my_thread_num];
for (i=startnewf; i<stopnewf; i++)
{
j = As_FF_diag_i[i];
if (D_w[i])
{
beta = -1.0/D_w[i];
As_FF_diag_data[j] = beta*(D_q[new_fine_to_fine[i]]+D_lambda[new_fine_to_fine[i]]);
for (j=As_FF_diag_i[i]+1; j < As_FF_diag_i[i+1]; j++)
As_FF_diag_data[j] *= beta;
for (j=As_FF_offd_i[i]; j < As_FF_offd_i[i+1]; j++)
As_FF_offd_data[j] *= beta;
}
}
for (i=startf; i<stopf; i++)
{
beta = D_inv[i];
for (j=As_FC_diag_i[i]; j < As_FC_diag_i[i+1]; j++)
As_FC_diag_data[j] *= beta;
for (j=As_FC_offd_i[i]; j < As_FC_offd_i[i+1]; j++)
As_FC_offd_data[j] *= beta;
}
} /* end parallel region */
W = hypre_ParMatmul(As_FF, As_FC);
W_diag = hypre_ParCSRMatrixDiag(W);
W_offd = hypre_ParCSRMatrixOffd(W);
W_diag_i = hypre_CSRMatrixI(W_diag);
W_diag_j = hypre_CSRMatrixJ(W_diag);
W_diag_data = hypre_CSRMatrixData(W_diag);
W_offd_i = hypre_CSRMatrixI(W_offd);
W_offd_j = hypre_CSRMatrixJ(W_offd);
W_offd_data = hypre_CSRMatrixData(W_offd);
num_cols_P_offd = hypre_CSRMatrixNumCols(W_offd);
/*-----------------------------------------------------------------------
* Intialize data for P
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_old_Cpts+1, memory_location_P);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_old_Cpts+1, memory_location_P);
P_diag_size = n_Cpts + hypre_CSRMatrixI(W_diag)[n_new_Fpts];
P_offd_size = hypre_CSRMatrixI(W_offd)[n_new_Fpts];
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,j,start,stop,startnewf,stopnewf,c_pt,row,cnt_diag,cnt_offd)
#endif
{
HYPRE_Int rowp;
HYPRE_Int my_thread_num = hypre_GetThreadNum();
start = start_array[my_thread_num];
stop = start_array[my_thread_num+1];
if (my_thread_num > 0)
c_pt = cpt_array[my_thread_num-1];
else
c_pt = 0;
row = 0;
if (my_thread_num) row = new_fpt_array[my_thread_num-1];
rowp = row;
if (my_thread_num > 0) rowp = row+cpt_array[my_thread_num-1];
cnt_diag = W_diag_i[row]+c_pt;
cnt_offd = W_offd_i[row];
for (i=start; i < stop; i++)
{
if (CF_marker[i] > 0)
{
rowp++;
P_diag_j[cnt_diag] = c_pt++;
P_diag_data[cnt_diag++] = 1.0;
P_diag_i[rowp] = cnt_diag;
P_offd_i[rowp] = cnt_offd;
}
else if (CF_marker[i] == -2)
{
rowp++;
for (j=W_diag_i[row]; j < W_diag_i[row+1]; j++)
{
P_diag_j[cnt_diag] = W_diag_j[j];
P_diag_data[cnt_diag++] = W_diag_data[j];
}
for (j=W_offd_i[row]; j < W_offd_i[row+1]; j++)
{
P_offd_j[cnt_offd] = W_offd_j[j];
P_offd_data[cnt_offd++] = W_offd_data[j];
}
row++;
P_diag_i[rowp] = cnt_diag;
P_offd_i[rowp] = cnt_offd;
}
}
} /* end parallel region */
/*-----------------------------------------------------------------------
* Create matrix
*-----------------------------------------------------------------------*/
P = hypre_ParCSRMatrixCreate(comm,
total_old_global_cpts,
total_global_cpts,
num_old_cpts_global,
num_cpts_global,
num_cols_P_offd,
P_diag_i[n_old_Cpts],
P_offd_i[n_old_Cpts]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
hypre_ParCSRMatrixColMapOffd(P) = hypre_ParCSRMatrixColMapOffd(W);
hypre_ParCSRMatrixColMapOffd(W) = NULL;
hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P;
hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
HYPRE_Int *map;
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_old_Cpts];
P_offd_size = P_offd_i[n_old_Cpts];
col_map_offd_P = hypre_ParCSRMatrixColMapOffd(P);
if (num_cols_P_offd)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
for (i=0; i < P_offd_size; i++)
{
P_marker[P_offd_j[i]] = 1;
}
new_ncols_P_offd = 0;
for (i=0; i < num_cols_P_offd; i++)
if (P_marker[i]) new_ncols_P_offd++;
new_col_map_offd = hypre_CTAlloc(HYPRE_BigInt, new_ncols_P_offd, HYPRE_MEMORY_HOST);
map = hypre_CTAlloc(HYPRE_Int, new_ncols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
if (P_marker[i])
{
new_col_map_offd[index] = col_map_offd_P[i];
map[index++] = i;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
{
P_offd_j[i] = hypre_BinarySearch(map, P_offd_j[i],
new_ncols_P_offd);
}
hypre_TFree(col_map_offd_P, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixColMapOffd(P) = new_col_map_offd;
hypre_CSRMatrixNumCols(P_offd) = new_ncols_P_offd;
hypre_TFree(map, HYPRE_MEMORY_HOST);
}
}
hypre_MatvecCommPkgCreate(P);
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(D_q, memory_location_P);
hypre_TFree(D_inv, memory_location_P);
hypre_TFree(D_inv_offd, memory_location_P);
hypre_TFree(D_lambda, memory_location_P);
hypre_TFree(D_lambda_offd, memory_location_P);
hypre_TFree(D_tau, memory_location_P);
hypre_TFree(D_w, memory_location_P);
hypre_TFree(cpt_array, HYPRE_MEMORY_HOST);
hypre_TFree(new_fpt_array, HYPRE_MEMORY_HOST);
hypre_TFree(start_array, HYPRE_MEMORY_HOST);
hypre_TFree(new_fine_to_fine, HYPRE_MEMORY_HOST);
hypre_TFree(buf_data, memory_location_P);
hypre_ParCSRMatrixDestroy(As_FF);
hypre_ParCSRMatrixDestroy(As_FC);
hypre_ParCSRMatrixDestroy(W);
return hypre_error_flag;
}
|
GB_unaryop__lnot_fp32_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_fp32_int32
// op(A') function: GB_tran__lnot_fp32_int32
// C type: float
// A type: int32_t
// cast: float cij = (float) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
float z = (float) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP32 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_fp32_int32
(
float *Cx, // Cx and Ax may be aliased
int32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_fp32_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
channel.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC H H AAA N N N N EEEEE L %
% C H H A A NN N NN N E L %
% C HHHHH AAAAA N N N N N N RRR L %
% C H H A A N NN N NN E L %
% CCCC H H A A N N N N EEEEE LLLLL %
% %
% %
% MagickCore Image Channel Methods %
% %
% Software Design %
% John Cristy %
% December 2003 %
% %
% %
% Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/cache-private.h"
#include "magick/channel.h"
#include "magick/color-private.h"
#include "magick/colorspace-private.h"
#include "magick/composite-private.h"
#include "magick/exception-private.h"
#include "magick/enhance.h"
#include "magick/image.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-accessor.h"
#include "magick/resource_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/utility.h"
#include "magick/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m b i n e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CombineImages() combines one or more images into a single image. The
% grayscale value of the pixels of each image in the sequence is assigned in
% order to the specified channels of the combined image. The typical
% ordering would be image 1 => Red, 2 => Green, 3 => Blue, etc.
%
% The format of the CombineImages method is:
%
% Image *CombineImages(const Image *image,const ChannelType channel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CombineImages(const Image *image,const ChannelType channel,
ExceptionInfo *exception)
{
#define CombineImageTag "Combine/Image"
CacheView
*combine_view;
const Image
*next;
Image
*combine_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Ensure the image are the same size.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
for (next=image; next != (Image *) NULL; next=GetNextImageInList(next))
{
if ((next->columns != image->columns) || (next->rows != image->rows))
ThrowImageException(OptionError,"ImagesAreNotTheSameSize");
}
combine_image=CloneImage(image,0,0,MagickTrue,exception);
if (combine_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(combine_image,DirectClass) == MagickFalse)
{
InheritException(exception,&combine_image->exception);
combine_image=DestroyImage(combine_image);
return((Image *) NULL);
}
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(combine_image,sRGBColorspace);
if ((channel & OpacityChannel) != 0)
combine_image->matte=MagickTrue;
(void) SetImageBackgroundColor(combine_image);
/*
Combine images.
*/
status=MagickTrue;
progress=0;
combine_view=AcquireAuthenticCacheView(combine_image,exception);
for (y=0; y < (ssize_t) combine_image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
PixelPacket
*pixels;
register const PixelPacket
*restrict p;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(combine_view,0,y,combine_image->columns,
1,exception);
if (pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
next=image;
if (((channel & RedChannel) != 0) && (next != (Image *) NULL))
{
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
q=pixels;
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(GetPixelIntensity(image,p)));
p++;
q++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (((channel & GreenChannel) != 0) && (next != (Image *) NULL))
{
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
q=pixels;
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
SetPixelGreen(q,ClampToQuantum(GetPixelIntensity(image,p)));
p++;
q++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (((channel & BlueChannel) != 0) && (next != (Image *) NULL))
{
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
q=pixels;
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
SetPixelBlue(q,ClampToQuantum(GetPixelIntensity(image,p)));
p++;
q++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (((channel & OpacityChannel) != 0) && (next != (Image *) NULL))
{
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
q=pixels;
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
SetPixelAlpha(q,ClampToQuantum(GetPixelIntensity(image,p)));
p++;
q++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) && (next != (Image *) NULL))
{
IndexPacket
*indexes;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
indexes=GetCacheViewAuthenticIndexQueue(combine_view);
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
SetPixelIndex(indexes+x,ClampToQuantum(GetPixelIntensity(image,p)));
p++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (SyncCacheViewAuthenticPixels(combine_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,CombineImageTag,progress++,
combine_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
combine_view=DestroyCacheView(combine_view);
if (IsGrayColorspace(combine_image->colorspace) != MagickFalse)
(void) TransformImageColorspace(combine_image,sRGBColorspace);
if (status == MagickFalse)
combine_image=DestroyImage(combine_image);
return(combine_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e A l p h a C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageAlphaChannel() returns MagickFalse if the image alpha channel is
% not activated. That is, the image is RGB rather than RGBA or CMYK rather
% than CMYKA.
%
% The format of the GetImageAlphaChannel method is:
%
% MagickBooleanType GetImageAlphaChannel(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType GetImageAlphaChannel(const Image *image)
{
assert(image != (const Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
return(image->matte);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p a r a t e I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SeparateImageChannel() separates a channel from the image and returns it as
% a grayscale image. A channel is a particular color component of each pixel
% in the image.
%
% The format of the SeparateImageChannel method is:
%
% MagickBooleanType SeparateImageChannel(Image *image,
% const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: Identify which channel to extract: RedChannel, GreenChannel,
% BlueChannel, OpacityChannel, CyanChannel, MagentaChannel,
% YellowChannel, or BlackChannel.
%
*/
MagickExport MagickBooleanType SeparateImageChannel(Image *image,
const ChannelType channel)
{
#define SeparateImageTag "Separate/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (channel == GrayChannels)
image->matte=MagickTrue;
/*
Separate image channels.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
switch (channel)
{
case RedChannel:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
q++;
}
break;
}
case GreenChannel:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelGreen(q));
SetPixelBlue(q,GetPixelGreen(q));
q++;
}
break;
}
case BlueChannel:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelBlue(q));
SetPixelGreen(q,GetPixelBlue(q));
q++;
}
break;
}
case OpacityChannel:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelOpacity(q));
SetPixelGreen(q,GetPixelOpacity(q));
SetPixelBlue(q,GetPixelOpacity(q));
q++;
}
break;
}
case BlackChannel:
{
if ((image->storage_class != PseudoClass) &&
(image->colorspace != CMYKColorspace))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelIndex(indexes+x));
SetPixelGreen(q,GetPixelIndex(indexes+x));
SetPixelBlue(q,GetPixelIndex(indexes+x));
q++;
}
break;
}
case TrueAlphaChannel:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelAlpha(q));
SetPixelGreen(q,GetPixelAlpha(q));
SetPixelBlue(q,GetPixelAlpha(q));
q++;
}
break;
}
case GrayChannels:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelAlpha(q,ClampToQuantum(GetPixelIntensity(image,q)));
q++;
}
break;
}
default:
break;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SeparateImageChannel)
#endif
proceed=SetImageProgress(image,SeparateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
if (channel != GrayChannels)
image->matte=MagickFalse;
(void) SetImageColorspace(image,GRAYColorspace);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p a r a t e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SeparateImages() returns a separate grayscale image for each channel
% specified.
%
% The format of the SeparateImages method is:
%
% MagickBooleanType SeparateImages(const Image *image,
% const ChannelType channel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: Identify which channels to extract: RedChannel, GreenChannel,
% BlueChannel, OpacityChannel, CyanChannel, MagentaChannel,
% YellowChannel, or BlackChannel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SeparateImages(const Image *image,const ChannelType channel,
ExceptionInfo *exception)
{
Image
*images,
*separate_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
images=NewImageList();
if ((channel & RedChannel) != 0)
{
separate_image=CloneImage(image,0,0,MagickTrue,exception);
(void) SeparateImageChannel(separate_image,RedChannel);
AppendImageToList(&images,separate_image);
}
if ((channel & GreenChannel) != 0)
{
separate_image=CloneImage(image,0,0,MagickTrue,exception);
(void) SeparateImageChannel(separate_image,GreenChannel);
AppendImageToList(&images,separate_image);
}
if ((channel & BlueChannel) != 0)
{
separate_image=CloneImage(image,0,0,MagickTrue,exception);
(void) SeparateImageChannel(separate_image,BlueChannel);
AppendImageToList(&images,separate_image);
}
if (((channel & BlackChannel) != 0) && (image->colorspace == CMYKColorspace))
{
separate_image=CloneImage(image,0,0,MagickTrue,exception);
(void) SeparateImageChannel(separate_image,BlackChannel);
AppendImageToList(&images,separate_image);
}
if ((channel & AlphaChannel) != 0)
{
separate_image=CloneImage(image,0,0,MagickTrue,exception);
(void) SeparateImageChannel(separate_image,TrueAlphaChannel);
AppendImageToList(&images,separate_image);
}
return(images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e A l p h a C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageAlphaChannel() activates, deactivates, resets, or sets the alpha
% channel.
%
% The format of the SetImageAlphaChannel method is:
%
% MagickBooleanType SetImageAlphaChannel(Image *image,
% const AlphaChannelType alpha_type)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o alpha_type: The alpha channel type: ActivateAlphaChannel,
% CopyAlphaChannel, DeactivateAlphaChannel, ExtractAlphaChannel,
% OpaqueAlphaChannel, ResetAlphaChannel, SetAlphaChannel,
% ShapeAlphaChannel, and TransparentAlphaChannel.
%
*/
MagickExport MagickBooleanType SetImageAlphaChannel(Image *image,
const AlphaChannelType alpha_type)
{
MagickBooleanType
status;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
status=MagickTrue;
switch (alpha_type)
{
case ActivateAlphaChannel:
{
image->matte=MagickTrue;
break;
}
case BackgroundAlphaChannel:
{
CacheView
*image_view;
ExceptionInfo
*exception;
IndexPacket
index;
MagickBooleanType
status;
MagickPixelPacket
background;
PixelPacket
pixel;
ssize_t
y;
/*
Set transparent pixels to background color.
*/
if (image->matte == MagickFalse)
break;
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
break;
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *)
NULL,&background);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&background);
index=0;
SetPixelPacket(image,&background,&pixel,&index);
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (q->opacity == TransparentOpacity)
{
SetPixelRed(q,pixel.red);
SetPixelGreen(q,pixel.green);
SetPixelBlue(q,pixel.blue);
}
q++;
}
if (image->colorspace == CMYKColorspace)
{
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,index);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
case CopyAlphaChannel:
case ShapeAlphaChannel:
{
/*
Special usage case for SeparateImageChannel(): copy grayscale color to
the alpha channel.
*/
status=SeparateImageChannel(image,GrayChannels);
image->matte=MagickTrue; /* make sure transparency is now on! */
if (alpha_type == ShapeAlphaChannel)
{
MagickPixelPacket
background;
/*
Reset all color channels to background color.
*/
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&(image->background_color),(IndexPacket *)
NULL,&background);
(void) LevelColorsImage(image,&background,&background,MagickTrue);
}
break;
}
case DeactivateAlphaChannel:
{
image->matte=MagickFalse;
break;
}
case ExtractAlphaChannel:
{
status=SeparateImageChannel(image,TrueAlphaChannel);
image->matte=MagickFalse;
break;
}
case RemoveAlphaChannel:
case FlattenAlphaChannel:
{
CacheView
*image_view;
ExceptionInfo
*exception;
IndexPacket
index;
MagickBooleanType
status;
MagickPixelPacket
background;
PixelPacket
pixel;
ssize_t
y;
/*
Flatten image pixels over the background pixels.
*/
if (image->matte == MagickFalse)
break;
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
break;
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *)
NULL,&background);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&background);
index=0;
SetPixelPacket(image,&background,&pixel,&index);
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma,
opacity;
gamma=1.0-QuantumScale*QuantumScale*q->opacity*pixel.opacity;
opacity=(double) QuantumRange*(1.0-gamma);
gamma=PerceptibleReciprocal(gamma);
q->red=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->red,
(MagickRealType) q->opacity,(MagickRealType) pixel.red,
(MagickRealType) pixel.opacity));
q->green=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->green,
(MagickRealType) q->opacity,(MagickRealType) pixel.green,
(MagickRealType) pixel.opacity));
q->blue=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->blue,
(MagickRealType) q->opacity,(MagickRealType) pixel.blue,
(MagickRealType) pixel.opacity));
q->opacity=ClampToQuantum(opacity);
q++;
}
if (image->colorspace == CMYKColorspace)
{
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,index);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
case ResetAlphaChannel: /* deprecated */
case OpaqueAlphaChannel:
{
status=SetImageOpacity(image,OpaqueOpacity);
break;
}
case SetAlphaChannel:
{
if (image->matte == MagickFalse)
status=SetImageOpacity(image,OpaqueOpacity);
break;
}
case TransparentAlphaChannel:
{
status=SetImageOpacity(image,TransparentOpacity);
break;
}
case UndefinedAlphaChannel:
break;
}
if (status == MagickFalse)
return(status);
return(SyncImagePixelCache(image,&image->exception));
}
|
NewTimer.h | //////////////////////////////////////////////////////////////////
// (c) Copyright 2008- by Ken Esler
//////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////
// Ken Esler
// National Center for Supercomputing Applications &
// University of Illinois, Urbana-Champaign
// Urbana, IL 61801
// e-mail: jnkim@ncsa.uiuc.edu
//
// Supported by
// National Center for Supercomputing Applications, UIUC
// Materials Computation Center, UIUC
//////////////////////////////////////////////////////////////////
// -*- C++ -*-
/** @file NewTimer.h
* @brief NewTimer class various high-resolution timers.
*/
#ifndef QMCPLUSPLUS_NEW_TIMER_H
#define QMCPLUSPLUS_NEW_TIMER_H
#include <Utilities/Clock.h>
#include <vector>
#include <string>
#include <algorithm>
class Communicate;
namespace qmcplusplus
{
/* Timer using omp_get_wtime */
class NewTimer
{
protected:
double start_time;
double total_time;
long num_calls;
std::string name;
public:
#if defined(DISABLE_TIMER)
inline void start() {}
inline void stop() {}
#else
inline void start()
{
start_time = cpu_clock();
}
inline void stop()
{
total_time += cpu_clock() - start_time;
num_calls++;
}
#endif
inline double get_total() const
{
return total_time;
}
inline long get_num_calls() const
{
return num_calls;
}
inline std::string get_name() const
{
return name;
}
inline void reset()
{
num_calls = 0;
total_time=0.0;
}
NewTimer(const std::string& myname) :
total_time(0.0), num_calls(0), name(myname)
{ }
void set_name(const std::string& myname)
{
name=myname;
}
};
struct TimerComparator
{
inline bool operator()(const NewTimer *a, const NewTimer *b)
{
return a->get_name() < b->get_name();
}
};
class TimerManagerClass
{
protected:
std::vector<NewTimer*> TimerList;
public:
inline void addTimer (NewTimer* t)
{
#pragma omp critical
{
TimerList.push_back(t);
}
}
void reset();
void print (Communicate* comm);
};
extern TimerManagerClass TimerManager;
}
#endif
|
ast-dump-openmp-target.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test() {
#pragma omp target
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target.c:3:1, line:6:1> line:3:6 test 'void ()'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:13, line:6:1>
// CHECK-NEXT: `-OMPTargetDirective {{.*}} <line:4:1, col:19>
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:3>
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: |-CapturedStmt {{.*}} <col:3>
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-NullStmt {{.*}} <col:3>
// CHECK-NEXT: | `-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target.c:4:1) *const restrict'
// CHECK-NEXT: |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target.c:4:1) *const restrict'
// CHECK-NEXT: |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | `-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: |-NullStmt {{.*}} <line:5:3>
// CHECK-NEXT: `-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target.c:4:1) *const restrict'
|
reduction_minus_2.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main()
{
int result = 100;
#pragma omp parallel reduction(-:result)
{
result -= omp_get_thread_num();
}
printf("Result: %d\n", result);
}
|
runner_openmm.c | /*
* This file is part of the GROMACS molecular simulation package.
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team,
* check out http://www.gromacs.org for more information.
* Copyright (c) 2012,2013, by the GROMACS development team, led by
* David van der Spoel, Berk Hess, Erik Lindahl, and including many
* others, as listed in the AUTHORS file in the top-level source
* directory and at http://www.gromacs.org.
*
* GROMACS is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either version 2.1
* of the License, or (at your option) any later version.
*
* GROMACS is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with GROMACS; if not, see
* http://www.gnu.org/licenses, or write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* If you want to redistribute modifications to GROMACS, please
* consider that scientific software is very special. Version
* control is crucial - bugs must be traceable. We will be happy to
* consider code for inclusion in the official distribution, but
* derived work must not be called official GROMACS. Details are found
* in the README & COPYING files - if they are missing, get the
* official version at http://www.gromacs.org.
*
* To help us fund GROMACS development, we humbly ask that you cite
* the research papers on the package. Check out http://www.gromacs.org.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#if defined(HAVE_SCHED_H) && defined(HAVE_SCHED_GETAFFINITY)
#define _GNU_SOURCE
#include <sched.h>
#include <sys/syscall.h>
#endif
#include <signal.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include "typedefs.h"
#include "smalloc.h"
#include "sysstuff.h"
#include "statutil.h"
#include "mdrun.h"
#include "md_logging.h"
#include "md_support.h"
#include "network.h"
#include "pull.h"
#include "names.h"
#include "disre.h"
#include "orires.h"
#include "pme.h"
#include "mdatoms.h"
#include "repl_ex.h"
#include "qmmm.h"
#include "mpelogging.h"
#include "domdec.h"
#include "partdec.h"
#include "coulomb.h"
#include "constr.h"
#include "mvdata.h"
#include "checkpoint.h"
#include "mtop_util.h"
#include "sighandler.h"
#include "tpxio.h"
#include "txtdump.h"
#include "gmx_detect_hardware.h"
#include "gmx_omp_nthreads.h"
#include "pull_rotation.h"
#include "calc_verletbuf.h"
#include "../mdlib/nbnxn_search.h"
#include "../mdlib/nbnxn_consts.h"
#include "gmx_fatal_collective.h"
#include "membed.h"
#include "md_openmm.h"
#include "gmx_omp.h"
#include "thread_mpi/threads.h"
#ifdef GMX_LIB_MPI
#include <mpi.h>
#endif
#ifdef GMX_THREAD_MPI
#include "tmpi.h"
#endif
#ifdef GMX_FAHCORE
#include "corewrap.h"
#endif
#include "gpu_utils.h"
#include "nbnxn_cuda_data_mgmt.h"
typedef struct {
gmx_integrator_t *func;
} gmx_intp_t;
/* The array should match the eI array in include/types/enums.h */
const gmx_intp_t integrator[eiNR] = { {do_md_openmm}, {do_md_openmm}, {do_md_openmm}, {do_md_openmm}, {do_md_openmm}, {do_md_openmm}, {do_md_openmm}, {do_md_openmm}, {do_md_openmm}, {do_md_openmm}, {do_md_openmm},{do_md_openmm}};
gmx_large_int_t deform_init_init_step_tpx;
matrix deform_init_box_tpx;
#ifdef GMX_THREAD_MPI
tMPI_Thread_mutex_t deform_init_box_mutex=TMPI_THREAD_MUTEX_INITIALIZER;
#endif
#ifdef GMX_THREAD_MPI
struct mdrunner_arglist
{
gmx_hw_opt_t *hw_opt;
FILE *fplog;
t_commrec *cr;
int nfile;
const t_filenm *fnm;
output_env_t oenv;
gmx_bool bVerbose;
gmx_bool bCompact;
int nstglobalcomm;
ivec ddxyz;
int dd_node_order;
real rdd;
real rconstr;
const char *dddlb_opt;
real dlb_scale;
const char *ddcsx;
const char *ddcsy;
const char *ddcsz;
const char *nbpu_opt;
int nsteps_cmdline;
int nstepout;
int resetstep;
int nmultisim;
int repl_ex_nst;
int repl_ex_nex;
int repl_ex_seed;
real pforce;
real cpt_period;
real max_hours;
const char *deviceOptions;
unsigned long Flags;
int ret; /* return value */
};
/* The function used for spawning threads. Extracts the mdrunner()
arguments from its one argument and calls mdrunner(), after making
a commrec. */
static void mdrunner_start_fn(void *arg)
{
struct mdrunner_arglist *mda=(struct mdrunner_arglist*)arg;
struct mdrunner_arglist mc=*mda; /* copy the arg list to make sure
that it's thread-local. This doesn't
copy pointed-to items, of course,
but those are all const. */
t_commrec *cr; /* we need a local version of this */
FILE *fplog=NULL;
t_filenm *fnm;
fnm = dup_tfn(mc.nfile, mc.fnm);
cr = init_par_threads(mc.cr);
if (MASTER(cr))
{
fplog=mc.fplog;
}
mda->ret=mdrunner(mc.hw_opt, fplog, cr, mc.nfile, fnm, mc.oenv,
mc.bVerbose, mc.bCompact, mc.nstglobalcomm,
mc.ddxyz, mc.dd_node_order, mc.rdd,
mc.rconstr, mc.dddlb_opt, mc.dlb_scale,
mc.ddcsx, mc.ddcsy, mc.ddcsz,
mc.nbpu_opt,
mc.nsteps_cmdline, mc.nstepout, mc.resetstep,
mc.nmultisim, mc.repl_ex_nst, mc.repl_ex_nex, mc.repl_ex_seed, mc.pforce,
mc.cpt_period, mc.max_hours, mc.deviceOptions, mc.Flags);
}
/* called by mdrunner() to start a specific number of threads (including
the main thread) for thread-parallel runs. This in turn calls mdrunner()
for each thread.
All options besides nthreads are the same as for mdrunner(). */
static t_commrec *mdrunner_start_threads(gmx_hw_opt_t *hw_opt,
FILE *fplog,t_commrec *cr,int nfile,
const t_filenm fnm[], const output_env_t oenv, gmx_bool bVerbose,
gmx_bool bCompact, int nstglobalcomm,
ivec ddxyz,int dd_node_order,real rdd,real rconstr,
const char *dddlb_opt,real dlb_scale,
const char *ddcsx,const char *ddcsy,const char *ddcsz,
const char *nbpu_opt,
int nsteps_cmdline, int nstepout,int resetstep,
int nmultisim,int repl_ex_nst,int repl_ex_nex, int repl_ex_seed,
real pforce,real cpt_period, real max_hours,
const char *deviceOptions, unsigned long Flags)
{
int ret;
struct mdrunner_arglist *mda;
t_commrec *crn; /* the new commrec */
t_filenm *fnmn;
/* first check whether we even need to start tMPI */
if (hw_opt->nthreads_tmpi < 2)
{
return cr;
}
/* a few small, one-time, almost unavoidable memory leaks: */
snew(mda,1);
fnmn=dup_tfn(nfile, fnm);
/* fill the data structure to pass as void pointer to thread start fn */
mda->hw_opt=hw_opt;
mda->fplog=fplog;
mda->cr=cr;
mda->nfile=nfile;
mda->fnm=fnmn;
mda->oenv=oenv;
mda->bVerbose=bVerbose;
mda->bCompact=bCompact;
mda->nstglobalcomm=nstglobalcomm;
mda->ddxyz[XX]=ddxyz[XX];
mda->ddxyz[YY]=ddxyz[YY];
mda->ddxyz[ZZ]=ddxyz[ZZ];
mda->dd_node_order=dd_node_order;
mda->rdd=rdd;
mda->rconstr=rconstr;
mda->dddlb_opt=dddlb_opt;
mda->dlb_scale=dlb_scale;
mda->ddcsx=ddcsx;
mda->ddcsy=ddcsy;
mda->ddcsz=ddcsz;
mda->nbpu_opt=nbpu_opt;
mda->nsteps_cmdline=nsteps_cmdline;
mda->nstepout=nstepout;
mda->resetstep=resetstep;
mda->nmultisim=nmultisim;
mda->repl_ex_nst=repl_ex_nst;
mda->repl_ex_nex=repl_ex_nex;
mda->repl_ex_seed=repl_ex_seed;
mda->pforce=pforce;
mda->cpt_period=cpt_period;
mda->max_hours=max_hours;
mda->deviceOptions=deviceOptions;
mda->Flags=Flags;
/* now spawn new threads that start mdrunner_start_fn(), while
the main thread returns */
ret=tMPI_Init_fn(TRUE, hw_opt->nthreads_tmpi,
(hw_opt->bThreadPinning ? TMPI_AFFINITY_ALL_CORES : TMPI_AFFINITY_NONE),
mdrunner_start_fn, (void*)(mda) );
if (ret!=TMPI_SUCCESS)
return NULL;
/* make a new comm_rec to reflect the new situation */
crn=init_par_threads(cr);
return crn;
}
static int get_tmpi_omp_thread_division(const gmx_hw_info_t *hwinfo,
const gmx_hw_opt_t *hw_opt,
int nthreads_tot,
int ngpu)
{
int nthreads_tmpi;
/* There are no separate PME nodes here, as we ensured in
* check_and_update_hw_opt that nthreads_tmpi>0 with PME nodes
* and a conditional ensures we would not have ended up here.
* Note that separate PME nodes might be switched on later.
*/
if (ngpu > 0)
{
nthreads_tmpi = ngpu;
if (nthreads_tot > 0 && nthreads_tot < nthreads_tmpi)
{
nthreads_tmpi = nthreads_tot;
}
}
else if (hw_opt->nthreads_omp > 0)
{
/* Here we could oversubscribe, when we do, we issue a warning later */
nthreads_tmpi = max(1,nthreads_tot/hw_opt->nthreads_omp);
}
else
{
/* TODO choose nthreads_omp based on hardware topology
when we have a hardware topology detection library */
/* In general, when running up to 4 threads, OpenMP should be faster.
* Note: on AMD Bulldozer we should avoid running OpenMP over two dies.
* On Intel>=Nehalem running OpenMP on a single CPU is always faster,
* even on two CPUs it's usually faster (but with many OpenMP threads
* it could be faster not to use HT, currently we always use HT).
* On Nehalem/Westmere we want to avoid running 16 threads over
* two CPUs with HT, so we need a limit<16; thus we use 12.
* A reasonable limit for Intel Sandy and Ivy bridge,
* not knowing the topology, is 16 threads.
*/
const int nthreads_omp_always_faster = 4;
const int nthreads_omp_always_faster_Nehalem = 12;
const int nthreads_omp_always_faster_SandyBridge = 16;
const int first_model_Nehalem = 0x1A;
const int first_model_SandyBridge = 0x2A;
gmx_bool bIntel_Family6;
bIntel_Family6 =
(gmx_cpuid_vendor(hwinfo->cpuid_info) == GMX_CPUID_VENDOR_INTEL &&
gmx_cpuid_family(hwinfo->cpuid_info) == 6);
if (nthreads_tot <= nthreads_omp_always_faster ||
(bIntel_Family6 &&
((gmx_cpuid_model(hwinfo->cpuid_info) >= nthreads_omp_always_faster_Nehalem && nthreads_tot <= nthreads_omp_always_faster_Nehalem) ||
(gmx_cpuid_model(hwinfo->cpuid_info) >= nthreads_omp_always_faster_SandyBridge && nthreads_tot <= nthreads_omp_always_faster_SandyBridge))))
{
/* Use pure OpenMP parallelization */
nthreads_tmpi = 1;
}
else
{
/* Don't use OpenMP parallelization */
nthreads_tmpi = nthreads_tot;
}
}
return nthreads_tmpi;
}
/* Get the number of threads to use for thread-MPI based on how many
* were requested, which algorithms we're using,
* and how many particles there are.
* At the point we have already called check_and_update_hw_opt.
* Thus all options should be internally consistent and consistent
* with the hardware, except that ntmpi could be larger than #GPU.
*/
static int get_nthreads_mpi(gmx_hw_info_t *hwinfo,
gmx_hw_opt_t *hw_opt,
t_inputrec *inputrec, gmx_mtop_t *mtop,
const t_commrec *cr,
FILE *fplog)
{
int nthreads_hw,nthreads_tot_max,nthreads_tmpi,nthreads_new,ngpu;
int min_atoms_per_mpi_thread;
char *env;
char sbuf[STRLEN];
gmx_bool bCanUseGPU;
if (hw_opt->nthreads_tmpi > 0)
{
/* Trivial, return right away */
return hw_opt->nthreads_tmpi;
}
nthreads_hw = hwinfo->nthreads_hw_avail;
/* How many total (#tMPI*#OpenMP) threads can we start? */
if (hw_opt->nthreads_tot > 0)
{
nthreads_tot_max = hw_opt->nthreads_tot;
}
else
{
nthreads_tot_max = nthreads_hw;
}
bCanUseGPU = (inputrec->cutoff_scheme == ecutsVERLET && hwinfo->bCanUseGPU);
if (bCanUseGPU)
{
ngpu = hwinfo->gpu_info.ncuda_dev_use;
}
else
{
ngpu = 0;
}
nthreads_tmpi =
get_tmpi_omp_thread_division(hwinfo,hw_opt,nthreads_tot_max,ngpu);
if (inputrec->eI == eiNM || EI_TPI(inputrec->eI))
{
/* Steps are divided over the nodes iso splitting the atoms */
min_atoms_per_mpi_thread = 0;
}
else
{
if (bCanUseGPU)
{
min_atoms_per_mpi_thread = MIN_ATOMS_PER_GPU;
}
else
{
min_atoms_per_mpi_thread = MIN_ATOMS_PER_MPI_THREAD;
}
}
/* Check if an algorithm does not support parallel simulation. */
if (nthreads_tmpi != 1 &&
( inputrec->eI == eiLBFGS ||
inputrec->coulombtype == eelEWALD ) )
{
nthreads_tmpi = 1;
md_print_warn(cr,fplog,"The integration or electrostatics algorithm doesn't support parallel runs. Using a single thread-MPI thread.\n");
if (hw_opt->nthreads_tmpi > nthreads_tmpi)
{
gmx_fatal(FARGS,"You asked for more than 1 thread-MPI thread, but an algorithm doesn't support that");
}
}
else if (mtop->natoms/nthreads_tmpi < min_atoms_per_mpi_thread)
{
/* the thread number was chosen automatically, but there are too many
threads (too few atoms per thread) */
nthreads_new = max(1,mtop->natoms/min_atoms_per_mpi_thread);
/* Avoid partial use of Hyper-Threading */
if (gmx_cpuid_x86_smt(hwinfo->cpuid_info) == GMX_CPUID_X86_SMT_ENABLED &&
nthreads_new > nthreads_hw/2 && nthreads_new < nthreads_hw)
{
nthreads_new = nthreads_hw/2;
}
/* Avoid large prime numbers in the thread count */
if (nthreads_new >= 6)
{
/* Use only 6,8,10 with additional factors of 2 */
int fac;
fac = 2;
while (3*fac*2 <= nthreads_new)
{
fac *= 2;
}
nthreads_new = (nthreads_new/fac)*fac;
}
else
{
/* Avoid 5 */
if (nthreads_new == 5)
{
nthreads_new = 4;
}
}
nthreads_tmpi = nthreads_new;
fprintf(stderr,"\n");
fprintf(stderr,"NOTE: Parallelization is limited by the small number of atoms,\n");
fprintf(stderr," only starting %d thread-MPI threads.\n",nthreads_tmpi);
fprintf(stderr," You can use the -nt and/or -ntmpi option to optimize the number of threads.\n\n");
}
return nthreads_tmpi;
}
#endif /* GMX_THREAD_MPI */
/* Environment variable for setting nstlist */
static const char* NSTLIST_ENVVAR = "GMX_NSTLIST";
/* Try to increase nstlist when using a GPU with nstlist less than this */
static const int NSTLIST_GPU_ENOUGH = 20;
/* Increase nstlist until the non-bonded cost increases more than this factor */
static const float NBNXN_GPU_LIST_OK_FAC = 1.25;
/* Don't increase nstlist beyond a non-bonded cost increases of this factor */
static const float NBNXN_GPU_LIST_MAX_FAC = 1.40;
/* Try to increase nstlist when running on a GPU */
static void increase_nstlist(FILE *fp,t_commrec *cr,
t_inputrec *ir,const gmx_mtop_t *mtop,matrix box)
{
char *env;
int nstlist_orig,nstlist_prev;
verletbuf_list_setup_t ls;
real rlist_inc,rlist_ok,rlist_max,rlist_new,rlist_prev;
int i;
t_state state_tmp;
gmx_bool bBox,bDD,bCont;
const char *nstl_fmt="\nFor optimal performance with a GPU nstlist (now %d) should be larger.\nThe optimum depends on your CPU and GPU resources.\nYou might want to try several nstlist values.\n";
const char *vbd_err="Can not increase nstlist for GPU run because verlet-buffer-drift is not set or used";
const char *box_err="Can not increase nstlist for GPU run because the box is too small";
const char *dd_err ="Can not increase nstlist for GPU run because of domain decomposition limitations";
char buf[STRLEN];
/* Number of + nstlist alternative values to try when switching */
const int nstl[]={ 20, 25, 40, 50 };
#define NNSTL sizeof(nstl)/sizeof(nstl[0])
env = getenv(NSTLIST_ENVVAR);
if (env == NULL)
{
if (fp != NULL)
{
fprintf(fp,nstl_fmt,ir->nstlist);
}
}
if (ir->verletbuf_drift == 0)
{
gmx_fatal(FARGS,"You are using an old tpr file with a GPU, please generate a new tpr file with an up to date version of grompp");
}
if (ir->verletbuf_drift < 0)
{
if (MASTER(cr))
{
fprintf(stderr,"%s\n",vbd_err);
}
if (fp != NULL)
{
fprintf(fp,"%s\n",vbd_err);
}
return;
}
nstlist_orig = ir->nstlist;
if (env != NULL)
{
sprintf(buf,"Getting nstlist from environment variable GMX_NSTLIST=%s",env);
if (MASTER(cr))
{
fprintf(stderr,"%s\n",buf);
}
if (fp != NULL)
{
fprintf(fp,"%s\n",buf);
}
sscanf(env,"%d",&ir->nstlist);
}
verletbuf_get_list_setup(TRUE,&ls);
/* Allow rlist to make the list double the size of the cut-off sphere */
rlist_inc = nbnxn_get_rlist_effective_inc(NBNXN_GPU_CLUSTER_SIZE,mtop->natoms/det(box));
rlist_ok = (max(ir->rvdw,ir->rcoulomb) + rlist_inc)*pow(NBNXN_GPU_LIST_OK_FAC,1.0/3.0) - rlist_inc;
rlist_max = (max(ir->rvdw,ir->rcoulomb) + rlist_inc)*pow(NBNXN_GPU_LIST_MAX_FAC,1.0/3.0) - rlist_inc;
if (debug)
{
fprintf(debug,"GPU nstlist tuning: rlist_inc %.3f rlist_max %.3f\n",
rlist_inc,rlist_max);
}
i = 0;
nstlist_prev = nstlist_orig;
rlist_prev = ir->rlist;
do
{
if (env == NULL)
{
ir->nstlist = nstl[i];
}
/* Set the pair-list buffer size in ir */
calc_verlet_buffer_size(mtop,det(box),ir,ir->verletbuf_drift,&ls,
NULL,&rlist_new);
/* Does rlist fit in the box? */
bBox = (sqr(rlist_new) < max_cutoff2(ir->ePBC,box));
bDD = TRUE;
if (bBox && DOMAINDECOMP(cr))
{
/* Check if rlist fits in the domain decomposition */
if (inputrec2nboundeddim(ir) < DIM)
{
gmx_incons("Changing nstlist with domain decomposition and unbounded dimensions is not implemented yet");
}
copy_mat(box,state_tmp.box);
bDD = change_dd_cutoff(cr,&state_tmp,ir,rlist_new);
}
bCont = FALSE;
if (env == NULL)
{
if (bBox && bDD && rlist_new <= rlist_max)
{
/* Increase nstlist */
nstlist_prev = ir->nstlist;
rlist_prev = rlist_new;
bCont = (i+1 < NNSTL && rlist_new < rlist_ok);
}
else
{
/* Stick with the previous nstlist */
ir->nstlist = nstlist_prev;
rlist_new = rlist_prev;
bBox = TRUE;
bDD = TRUE;
}
}
i++;
}
while (bCont);
if (!bBox || !bDD)
{
gmx_warning(!bBox ? box_err : dd_err);
if (fp != NULL)
{
fprintf(fp,"\n%s\n",bBox ? box_err : dd_err);
}
ir->nstlist = nstlist_orig;
}
else if (ir->nstlist != nstlist_orig || rlist_new != ir->rlist)
{
sprintf(buf,"Changing nstlist from %d to %d, rlist from %g to %g",
nstlist_orig,ir->nstlist,
ir->rlist,rlist_new);
if (MASTER(cr))
{
fprintf(stderr,"%s\n\n",buf);
}
if (fp != NULL)
{
fprintf(fp,"%s\n\n",buf);
}
ir->rlist = rlist_new;
ir->rlistlong = rlist_new;
}
}
static void prepare_verlet_scheme(FILE *fplog,
gmx_hw_info_t *hwinfo,
t_commrec *cr,
gmx_hw_opt_t *hw_opt,
const char *nbpu_opt,
t_inputrec *ir,
const gmx_mtop_t *mtop,
matrix box,
gmx_bool *bUseGPU)
{
/* Here we only check for GPU usage on the MPI master process,
* as here we don't know how many GPUs we will use yet.
* We check for a GPU on all processes later.
*/
*bUseGPU = hwinfo->bCanUseGPU || (getenv("GMX_EMULATE_GPU") != NULL);
if (ir->verletbuf_drift > 0)
{
/* Update the Verlet buffer size for the current run setup */
verletbuf_list_setup_t ls;
real rlist_new;
/* Here we assume CPU acceleration is on. But as currently
* calc_verlet_buffer_size gives the same results for 4x8 and 4x4
* and 4x2 gives a larger buffer than 4x4, this is ok.
*/
verletbuf_get_list_setup(*bUseGPU,&ls);
calc_verlet_buffer_size(mtop,det(box),ir,
ir->verletbuf_drift,&ls,
NULL,&rlist_new);
if (rlist_new != ir->rlist)
{
if (fplog != NULL)
{
fprintf(fplog,"\nChanging rlist from %g to %g for non-bonded %dx%d atom kernels\n\n",
ir->rlist,rlist_new,
ls.cluster_size_i,ls.cluster_size_j);
}
ir->rlist = rlist_new;
ir->rlistlong = rlist_new;
}
}
/* With GPU or emulation we should check nstlist for performance */
if ((EI_DYNAMICS(ir->eI) &&
*bUseGPU &&
ir->nstlist < NSTLIST_GPU_ENOUGH) ||
getenv(NSTLIST_ENVVAR) != NULL)
{
/* Choose a better nstlist */
increase_nstlist(fplog,cr,ir,mtop,box);
}
}
static void convert_to_verlet_scheme(FILE *fplog,
t_inputrec *ir,
gmx_mtop_t *mtop,real box_vol)
{
char *conv_mesg="Converting input file with group cut-off scheme to the Verlet cut-off scheme";
md_print_warn(NULL,fplog,"%s\n",conv_mesg);
ir->cutoff_scheme = ecutsVERLET;
ir->verletbuf_drift = 0.005;
if (ir->rcoulomb != ir->rvdw)
{
gmx_fatal(FARGS,"The VdW and Coulomb cut-offs are different, whereas the Verlet scheme only supports equal cut-offs");
}
if (ir->vdwtype == evdwUSER || EEL_USER(ir->coulombtype))
{
gmx_fatal(FARGS,"User non-bonded potentials are not (yet) supported with the Verlet scheme");
}
else if (EVDW_SWITCHED(ir->vdwtype) || EEL_SWITCHED(ir->coulombtype))
{
md_print_warn(NULL,fplog,"Converting switched or shifted interactions to a shifted potential (without force shift), this will lead to slightly different interaction potentials");
if (EVDW_SWITCHED(ir->vdwtype))
{
ir->vdwtype = evdwCUT;
}
if (EEL_SWITCHED(ir->coulombtype))
{
if (EEL_FULL(ir->coulombtype))
{
/* With full electrostatic only PME can be switched */
ir->coulombtype = eelPME;
}
else
{
md_print_warn(NULL,fplog,"NOTE: Replacing %s electrostatics with reaction-field with epsilon-rf=inf\n",eel_names[ir->coulombtype]);
ir->coulombtype = eelRF;
ir->epsilon_rf = 0.0;
}
}
/* We set the target energy drift to a small number.
* Note that this is only for testing. For production the user
* should think about this and set the mdp options.
*/
ir->verletbuf_drift = 1e-4;
}
if (inputrec2nboundeddim(ir) != 3)
{
gmx_fatal(FARGS,"Can only convert old tpr files to the Verlet cut-off scheme with 3D pbc");
}
if (ir->efep != efepNO || ir->implicit_solvent != eisNO)
{
gmx_fatal(FARGS,"Will not convert old tpr files to the Verlet cut-off scheme with free-energy calculations or implicit solvent");
}
if (EI_DYNAMICS(ir->eI) && !(EI_MD(ir->eI) && ir->etc == etcNO))
{
verletbuf_list_setup_t ls;
verletbuf_get_list_setup(FALSE,&ls);
calc_verlet_buffer_size(mtop,box_vol,ir,ir->verletbuf_drift,&ls,
NULL,&ir->rlist);
}
else
{
ir->verletbuf_drift = -1;
ir->rlist = 1.05*max(ir->rvdw,ir->rcoulomb);
}
gmx_mtop_remove_chargegroups(mtop);
}
/* Check the process affinity mask. If it is non-zero, something
* else has set the affinity, and mdrun should honor that and
* not attempt to do its own thread pinning.
*
* This function should be called twice. Once before the OpenMP
* library gets initialized with bAfterOpenMPInit=FALSE (which will
* detect affinity set by external tools like taskset), and again
* later, after the OpenMP initialization, with bAfterOpenMPInit=TRUE
* (which will detect affinity changes made by the OpenMP library).
*
* Note that this will only work on Linux, because we use a GNU
* feature. */
static void check_cpu_affinity_set(FILE *fplog, const t_commrec *cr,
gmx_hw_opt_t *hw_opt, int ncpus,
gmx_bool bAfterOpenmpInit)
{
#ifdef HAVE_SCHED_GETAFFINITY
cpu_set_t mask_current;
int i, ret, cpu_count, cpu_set;
gmx_bool bAllSet;
assert(hw_opt);
if (!hw_opt->bThreadPinning)
{
/* internal affinity setting is off, don't bother checking process affinity */
return;
}
CPU_ZERO(&mask_current);
if ((ret = sched_getaffinity(0, sizeof(cpu_set_t), &mask_current)) != 0)
{
/* failed to query affinity mask, will just return */
if (debug)
{
fprintf(debug, "Failed to query affinity mask (error %d)", ret);
}
return;
}
/* Before proceeding with the actual check, make sure that the number of
* detected CPUs is >= the CPUs in the current set.
* We need to check for CPU_COUNT as it was added only in glibc 2.6. */
#ifdef CPU_COUNT
if (ncpus < CPU_COUNT(&mask_current))
{
if (debug)
{
fprintf(debug, "%d CPUs detected, but %d was returned by CPU_COUNT",
ncpus, CPU_COUNT(&mask_current));
}
return;
}
#endif /* CPU_COUNT */
bAllSet = TRUE;
for (i = 0; (i < ncpus && i < CPU_SETSIZE); i++)
{
bAllSet = bAllSet && (CPU_ISSET(i, &mask_current) != 0);
}
if (!bAllSet)
{
if (!bAfterOpenmpInit)
{
md_print_warn(cr, fplog,
"%s detected a non-default process affinity, "
"so it will not attempt to pin its threads", ShortProgram());
}
else
{
md_print_warn(cr, fplog,
"%s detected a non-default process affinity, "
"probably set by the OpenMP library, "
"so it will not attempt to pin its threads", ShortProgram());
}
hw_opt->bThreadPinning = FALSE;
if (debug)
{
fprintf(debug, "Non-default affinity mask found, mdrun will not pin threads\n");
}
}
else
{
if (debug)
{
fprintf(debug, "Default affinity mask found\n");
}
}
#endif /* HAVE_SCHED_GETAFFINITY */
}
/* Set CPU affinity. Can be important for performance.
On some systems (e.g. Cray) CPU Affinity is set by default.
But default assigning doesn't work (well) with only some ranks
having threads. This causes very low performance.
External tools have cumbersome syntax for setting affinity
in the case that only some ranks have threads.
Thus it is important that GROMACS sets the affinity internally
if only PME is using threads.
*/
static void set_cpu_affinity(FILE *fplog,
const t_commrec *cr,
gmx_hw_opt_t *hw_opt,
int nthreads_pme,
const gmx_hw_info_t *hwinfo,
const t_inputrec *inputrec)
{
#if defined GMX_THREAD_MPI
/* With the number of TMPI threads equal to the number of cores
* we already pinned in thread-MPI, so don't pin again here.
*/
if (hw_opt->nthreads_tmpi == tMPI_Thread_get_hw_number())
{
return;
}
#endif
#ifndef __APPLE__
/* If the tMPI thread affinity setting is not supported encourage the user
* to report it as it's either a bug or an exotic platform which we might
* want to support. */
if (tMPI_Thread_setaffinity_support() != TMPI_SETAFFINITY_SUPPORT_YES)
{
md_print_warn(NULL, fplog,
"Can not set thread affinities on the current plarform. On NUMA systems this\n"
"can cause performance degradation. If you think your platform should support\n"
"setting affinities, contact the GROMACS developers.");
return;
}
#endif /* __APPLE__ */
if (hw_opt->bThreadPinning)
{
int nth_affinity_set, thread_id_node, thread_id,
nthread_local, nthread_node, nthread_hw_max, nphyscore;
int offset;
char *env;
/* threads on this MPI process or TMPI thread */
if (cr->duty & DUTY_PP)
{
nthread_local = gmx_omp_nthreads_get(emntNonbonded);
}
else
{
nthread_local = gmx_omp_nthreads_get(emntPME);
}
/* map the current process to cores */
thread_id_node = 0;
nthread_node = nthread_local;
#ifdef GMX_MPI
if (PAR(cr) || MULTISIM(cr))
{
/* We need to determine a scan of the thread counts in this
* compute node.
*/
MPI_Comm comm_intra;
MPI_Comm_split(MPI_COMM_WORLD,gmx_hostname_num(),cr->rank_intranode,
&comm_intra);
MPI_Scan(&nthread_local,&thread_id_node,1,MPI_INT,MPI_SUM,comm_intra);
/* MPI_Scan is inclusive, but here we need exclusive */
thread_id_node -= nthread_local;
/* Get the total number of threads on this physical node */
MPI_Allreduce(&nthread_local,&nthread_node,1,MPI_INT,MPI_SUM,comm_intra);
MPI_Comm_free(&comm_intra);
}
#endif
offset = 0;
if (hw_opt->core_pinning_offset > 0)
{
offset = hw_opt->core_pinning_offset;
if (SIMMASTER(cr))
{
fprintf(stderr, "Applying core pinning offset %d\n", offset);
}
if (fplog)
{
fprintf(fplog, "Applying core pinning offset %d\n", offset);
}
}
/* With Intel Hyper-Threading enabled, we want to pin consecutive
* threads to physical cores when using more threads than physical
* cores or when the user requests so.
*/
nthread_hw_max = hwinfo->nthreads_hw_avail;
nphyscore = -1;
if (hw_opt->bPinHyperthreading ||
(gmx_cpuid_x86_smt(hwinfo->cpuid_info) == GMX_CPUID_X86_SMT_ENABLED &&
nthread_node > nthread_hw_max/2 && getenv("GMX_DISABLE_PINHT") == NULL))
{
if (gmx_cpuid_x86_smt(hwinfo->cpuid_info) != GMX_CPUID_X86_SMT_ENABLED)
{
/* We print to stderr on all processes, as we might have
* different settings on different physical nodes.
*/
if (gmx_cpuid_vendor(hwinfo->cpuid_info) != GMX_CPUID_VENDOR_INTEL)
{
md_print_warn(NULL, fplog, "Pinning for Hyper-Threading layout requested, "
"but non-Intel CPU detected (vendor: %s)\n",
gmx_cpuid_vendor_string[gmx_cpuid_vendor(hwinfo->cpuid_info)]);
}
else
{
md_print_warn(NULL, fplog, "Pinning for Hyper-Threading layout requested, "
"but the CPU detected does not have Intel Hyper-Threading support "
"(or it is turned off)\n");
}
}
nphyscore = nthread_hw_max/2;
if (SIMMASTER(cr))
{
fprintf(stderr, "Pinning to Hyper-Threading cores with %d physical cores in a compute node\n",
nphyscore);
}
if (fplog)
{
fprintf(fplog, "Pinning to Hyper-Threading cores with %d physical cores in a compute node\n",
nphyscore);
}
}
/* Set the per-thread affinity. In order to be able to check the success
* of affinity settings, we will set nth_affinity_set to 1 on threads
* where the affinity setting succeded and to 0 where it failed.
* Reducing these 0/1 values over the threads will give the total number
* of threads on which we succeeded.
*/
nth_affinity_set = 0;
#pragma omp parallel firstprivate(thread_id_node) num_threads(nthread_local) \
reduction(+:nth_affinity_set)
{
int core;
gmx_bool setaffinity_ret;
thread_id = gmx_omp_get_thread_num();
thread_id_node += thread_id;
if (nphyscore <= 0)
{
core = offset + thread_id_node;
}
else
{
/* Lock pairs of threads to the same hyperthreaded core */
core = offset + thread_id_node/2 + (thread_id_node % 2)*nphyscore;
}
setaffinity_ret = tMPI_Thread_setaffinity_single(tMPI_Thread_self(), core);
/* store the per-thread success-values of the setaffinity */
nth_affinity_set = (setaffinity_ret == 0);
if (debug)
{
fprintf(debug, "On rank %2d, thread %2d, core %2d the affinity setting returned %d\n",
cr->nodeid, gmx_omp_get_thread_num(), core, setaffinity_ret);
}
}
if (nth_affinity_set > nthread_local)
{
char msg[STRLEN];
sprintf(msg, "Looks like we have set affinity for more threads than "
"we have (%d > %d)!\n", nth_affinity_set, nthread_local);
gmx_incons(msg);
}
else
{
/* check & warn if some threads failed to set their affinities */
if (nth_affinity_set != nthread_local)
{
char sbuf1[STRLEN], sbuf2[STRLEN];
/* sbuf1 contains rank info, while sbuf2 OpenMP thread info */
sbuf1[0] = sbuf2[0] = '\0';
#ifdef GMX_MPI
#ifdef GMX_THREAD_MPI
sprintf(sbuf1, "In thread-MPI thread #%d: ", cr->nodeid);
#else /* GMX_LIB_MPI */
sprintf(sbuf1, "In MPI process #%d: ", cr->nodeid);
#endif
#endif /* GMX_MPI */
if (nthread_local > 1)
{
sprintf(sbuf2, "of %d/%d thread%s ",
nthread_local - nth_affinity_set, nthread_local,
(nthread_local - nth_affinity_set) > 1 ? "s" : "");
}
md_print_warn(NULL, fplog,
"NOTE: %sAffinity setting %sfailed.\n"
" This can cause performance degradation!",
sbuf1, sbuf2);
}
}
}
}
static void check_and_update_hw_opt(gmx_hw_opt_t *hw_opt,
int cutoff_scheme)
{
gmx_omp_nthreads_read_env(&hw_opt->nthreads_omp);
#ifndef GMX_THREAD_MPI
if (hw_opt->nthreads_tot > 0)
{
gmx_fatal(FARGS,"Setting the total number of threads is only supported with thread-MPI and Gromacs was compiled without thread-MPI");
}
if (hw_opt->nthreads_tmpi > 0)
{
gmx_fatal(FARGS,"Setting the number of thread-MPI threads is only supported with thread-MPI and Gromacs was compiled without thread-MPI");
}
#endif
if (hw_opt->nthreads_tot > 0 && hw_opt->nthreads_omp_pme <= 0)
{
/* We have the same number of OpenMP threads for PP and PME processes,
* thus we can perform several consistency checks.
*/
if (hw_opt->nthreads_tmpi > 0 &&
hw_opt->nthreads_omp > 0 &&
hw_opt->nthreads_tot != hw_opt->nthreads_tmpi*hw_opt->nthreads_omp)
{
gmx_fatal(FARGS,"The total number of threads requested (%d) does not match the thread-MPI threads (%d) times the OpenMP threads (%d) requested",
hw_opt->nthreads_tot,hw_opt->nthreads_tmpi,hw_opt->nthreads_omp);
}
if (hw_opt->nthreads_tmpi > 0 &&
hw_opt->nthreads_tot % hw_opt->nthreads_tmpi != 0)
{
gmx_fatal(FARGS,"The total number of threads requested (%d) is not divisible by the number of thread-MPI threads requested (%d)",
hw_opt->nthreads_tot,hw_opt->nthreads_tmpi);
}
if (hw_opt->nthreads_omp > 0 &&
hw_opt->nthreads_tot % hw_opt->nthreads_omp != 0)
{
gmx_fatal(FARGS,"The total number of threads requested (%d) is not divisible by the number of OpenMP threads requested (%d)",
hw_opt->nthreads_tot,hw_opt->nthreads_omp);
}
if (hw_opt->nthreads_tmpi > 0 &&
hw_opt->nthreads_omp <= 0)
{
hw_opt->nthreads_omp = hw_opt->nthreads_tot/hw_opt->nthreads_tmpi;
}
}
#ifndef GMX_OPENMP
if (hw_opt->nthreads_omp > 1)
{
gmx_fatal(FARGS,"OpenMP threads are requested, but Gromacs was compiled without OpenMP support");
}
#endif
if (cutoff_scheme == ecutsGROUP)
{
/* We only have OpenMP support for PME only nodes */
if (hw_opt->nthreads_omp > 1)
{
gmx_fatal(FARGS,"OpenMP threads have been requested with cut-off scheme %s, but these are only supported with cut-off scheme %s",
ecutscheme_names[cutoff_scheme],
ecutscheme_names[ecutsVERLET]);
}
hw_opt->nthreads_omp = 1;
}
if (hw_opt->nthreads_omp_pme > 0 && hw_opt->nthreads_omp <= 0)
{
gmx_fatal(FARGS,"You need to specify -ntomp in addition to -ntomp_pme");
}
if (hw_opt->nthreads_tot == 1)
{
hw_opt->nthreads_tmpi = 1;
if (hw_opt->nthreads_omp > 1)
{
gmx_fatal(FARGS,"You requested %d OpenMP threads with %d total threads",
hw_opt->nthreads_tmpi,hw_opt->nthreads_tot);
}
hw_opt->nthreads_omp = 1;
}
if (hw_opt->nthreads_omp_pme <= 0 && hw_opt->nthreads_omp > 0)
{
hw_opt->nthreads_omp_pme = hw_opt->nthreads_omp;
}
if (debug)
{
fprintf(debug,"hw_opt: nt %d ntmpi %d ntomp %d ntomp_pme %d gpu_id '%s'\n",
hw_opt->nthreads_tot,
hw_opt->nthreads_tmpi,
hw_opt->nthreads_omp,
hw_opt->nthreads_omp_pme,
hw_opt->gpu_id!=NULL ? hw_opt->gpu_id : "");
}
}
/* Override the value in inputrec with value passed on the command line (if any) */
static void override_nsteps_cmdline(FILE *fplog,
int nsteps_cmdline,
t_inputrec *ir,
const t_commrec *cr)
{
assert(ir);
assert(cr);
/* override with anything else than the default -2 */
if (nsteps_cmdline > -2)
{
char stmp[STRLEN];
ir->nsteps = nsteps_cmdline;
if (EI_DYNAMICS(ir->eI))
{
sprintf(stmp, "Overriding nsteps with value passed on the command line: %d steps, %.3f ps",
nsteps_cmdline, nsteps_cmdline*ir->delta_t);
}
else
{
sprintf(stmp, "Overriding nsteps with value passed on the command line: %d steps",
nsteps_cmdline);
}
md_print_warn(cr, fplog, "%s\n", stmp);
}
}
/* Data structure set by SIMMASTER which needs to be passed to all nodes
* before the other nodes have read the tpx file and called gmx_detect_hardware.
*/
typedef struct {
int cutoff_scheme; /* The cutoff scheme from inputrec_t */
gmx_bool bUseGPU; /* Use GPU or GPU emulation */
} master_inf_t;
int mdrunner(gmx_hw_opt_t *hw_opt,
FILE *fplog,t_commrec *cr,int nfile,
const t_filenm fnm[], const output_env_t oenv, gmx_bool bVerbose,
gmx_bool bCompact, int nstglobalcomm,
ivec ddxyz,int dd_node_order,real rdd,real rconstr,
const char *dddlb_opt,real dlb_scale,
const char *ddcsx,const char *ddcsy,const char *ddcsz,
const char *nbpu_opt,
int nsteps_cmdline, int nstepout,int resetstep,
int nmultisim,int repl_ex_nst,int repl_ex_nex,
int repl_ex_seed, real pforce,real cpt_period,real max_hours,
const char *deviceOptions, unsigned long Flags)
{
gmx_bool bForceUseGPU,bTryUseGPU;
double nodetime=0,realtime;
t_inputrec *inputrec;
t_state *state=NULL;
matrix box;
gmx_ddbox_t ddbox={0};
int npme_major,npme_minor;
real tmpr1,tmpr2;
t_nrnb *nrnb;
gmx_mtop_t *mtop=NULL;
t_mdatoms *mdatoms=NULL;
t_forcerec *fr=NULL;
t_fcdata *fcd=NULL;
real ewaldcoeff=0;
gmx_pme_t *pmedata=NULL;
gmx_vsite_t *vsite=NULL;
gmx_constr_t constr;
int i,m,nChargePerturbed=-1,status,nalloc;
char *gro;
gmx_wallcycle_t wcycle;
gmx_bool bReadRNG,bReadEkin;
int list;
gmx_runtime_t runtime;
int rc;
gmx_large_int_t reset_counters;
gmx_edsam_t ed=NULL;
t_commrec *cr_old=cr;
int nthreads_pme=1;
int nthreads_pp=1;
gmx_membed_t membed=NULL;
gmx_hw_info_t *hwinfo=NULL;
master_inf_t minf={-1,FALSE};
/* CAUTION: threads may be started later on in this function, so
cr doesn't reflect the final parallel state right now */
snew(inputrec,1);
snew(mtop,1);
if (Flags & MD_APPENDFILES)
{
fplog = NULL;
}
bForceUseGPU = (strncmp(nbpu_opt, "gpu", 3) == 0);
bTryUseGPU = (strncmp(nbpu_opt, "auto", 4) == 0) || bForceUseGPU;
snew(state,1);
if (SIMMASTER(cr))
{
/* Read (nearly) all data required for the simulation */
read_tpx_state(ftp2fn(efTPX,nfile,fnm),inputrec,state,NULL,mtop);
if (inputrec->cutoff_scheme != ecutsVERLET &&
((Flags & MD_TESTVERLET) || getenv("GMX_VERLET_SCHEME") != NULL))
{
convert_to_verlet_scheme(fplog,inputrec,mtop,det(state->box));
}
/* Detect hardware, gather information. With tMPI only thread 0 does it
* and after threads are started broadcasts hwinfo around. */
snew(hwinfo, 1);
gmx_detect_hardware(fplog, hwinfo, cr,
bForceUseGPU, bTryUseGPU, hw_opt->gpu_id);
minf.cutoff_scheme = inputrec->cutoff_scheme;
minf.bUseGPU = FALSE;
if (inputrec->cutoff_scheme == ecutsVERLET)
{
prepare_verlet_scheme(fplog,hwinfo,cr,hw_opt,nbpu_opt,
inputrec,mtop,state->box,
&minf.bUseGPU);
}
else if (hwinfo->bCanUseGPU)
{
md_print_warn(cr,fplog,
"NOTE: GPU(s) found, but the current simulation can not use GPUs\n"
" To use a GPU, set the mdp option: cutoff-scheme = Verlet\n"
" (for quick performance testing you can use the -testverlet option)\n");
if (bForceUseGPU)
{
gmx_fatal(FARGS,"GPU requested, but can't be used without cutoff-scheme=Verlet");
}
}
}
#ifndef GMX_THREAD_MPI
if (PAR(cr))
{
gmx_bcast_sim(sizeof(minf),&minf,cr);
}
#endif
if (minf.bUseGPU && cr->npmenodes == -1)
{
/* Don't automatically use PME-only nodes with GPUs */
cr->npmenodes = 0;
}
/* Check for externally set OpenMP affinity and turn off internal
* pinning if any is found. We need to do this check early to tell
* thread-MPI whether it should do pinning when spawning threads.
*/
gmx_omp_check_thread_affinity(fplog, cr, hw_opt);
#ifdef GMX_THREAD_MPI
/* With thread-MPI inputrec is only set here on the master thread */
if (SIMMASTER(cr))
#endif
{
check_and_update_hw_opt(hw_opt,minf.cutoff_scheme);
#ifdef GMX_THREAD_MPI
/* Early check for externally set process affinity. Can't do over all
* MPI processes because hwinfo is not available everywhere, but with
* thread-MPI it's needed as pinning might get turned off which needs
* to be known before starting thread-MPI. */
check_cpu_affinity_set(fplog,
NULL,
hw_opt, hwinfo->nthreads_hw_avail, FALSE);
#endif
#ifdef GMX_THREAD_MPI
if (cr->npmenodes > 0 && hw_opt->nthreads_tmpi <= 0)
{
gmx_fatal(FARGS,"You need to explicitly specify the number of MPI threads (-ntmpi) when using separate PME nodes");
}
#endif
if (hw_opt->nthreads_omp_pme != hw_opt->nthreads_omp &&
cr->npmenodes <= 0)
{
gmx_fatal(FARGS,"You need to explicitly specify the number of PME nodes (-npme) when using different number of OpenMP threads for PP and PME nodes");
}
}
#ifdef GMX_THREAD_MPI
if (SIMMASTER(cr))
{
/* NOW the threads will be started: */
hw_opt->nthreads_tmpi = get_nthreads_mpi(hwinfo,
hw_opt,
inputrec, mtop,
cr, fplog);
if (hw_opt->nthreads_tot > 0 && hw_opt->nthreads_omp <= 0)
{
hw_opt->nthreads_omp = hw_opt->nthreads_tot/hw_opt->nthreads_tmpi;
}
if (hw_opt->nthreads_tmpi > 1)
{
/* now start the threads. */
cr=mdrunner_start_threads(hw_opt, fplog, cr_old, nfile, fnm,
oenv, bVerbose, bCompact, nstglobalcomm,
ddxyz, dd_node_order, rdd, rconstr,
dddlb_opt, dlb_scale, ddcsx, ddcsy, ddcsz,
nbpu_opt,
nsteps_cmdline, nstepout, resetstep, nmultisim,
repl_ex_nst, repl_ex_nex, repl_ex_seed, pforce,
cpt_period, max_hours, deviceOptions,
Flags);
/* the main thread continues here with a new cr. We don't deallocate
the old cr because other threads may still be reading it. */
if (cr == NULL)
{
gmx_comm("Failed to spawn threads");
}
}
}
#endif
/* END OF CAUTION: cr is now reliable */
/* g_membed initialisation *
* Because we change the mtop, init_membed is called before the init_parallel *
* (in case we ever want to make it run in parallel) */
if (opt2bSet("-membed",nfile,fnm))
{
if (MASTER(cr))
{
fprintf(stderr,"Initializing membed");
}
membed = init_membed(fplog,nfile,fnm,mtop,inputrec,state,cr,&cpt_period);
}
if (PAR(cr))
{
/* now broadcast everything to the non-master nodes/threads: */
init_parallel(fplog, cr, inputrec, mtop);
/* This check needs to happen after get_nthreads_mpi() */
if (inputrec->cutoff_scheme == ecutsVERLET && (Flags & MD_PARTDEC))
{
gmx_fatal_collective(FARGS,cr,NULL,
"The Verlet cut-off scheme is not supported with particle decomposition.\n"
"You can achieve the same effect as particle decomposition by running in parallel using only OpenMP threads.");
}
}
if (fplog != NULL)
{
pr_inputrec(fplog,0,"Input Parameters",inputrec,FALSE);
}
#if defined GMX_THREAD_MPI
/* With tMPI we detected on thread 0 and we'll just pass the hwinfo pointer
* to the other threads -- slightly uncool, but works fine, just need to
* make sure that the data doesn't get freed twice. */
if (cr->nnodes > 1)
{
if (!SIMMASTER(cr))
{
snew(hwinfo, 1);
}
gmx_bcast(sizeof(&hwinfo), &hwinfo, cr);
}
#else
if (PAR(cr) && !SIMMASTER(cr))
{
/* now we have inputrec on all nodes, can run the detection */
/* TODO: perhaps it's better to propagate within a node instead? */
snew(hwinfo, 1);
gmx_detect_hardware(fplog, hwinfo, cr,
bForceUseGPU, bTryUseGPU, hw_opt->gpu_id);
}
/* Now do the affinity check with MPI/no-MPI (done earlier with thread-MPI). */
check_cpu_affinity_set(fplog, cr,
hw_opt, hwinfo->nthreads_hw_avail, FALSE);
#endif
/* now make sure the state is initialized and propagated */
set_state_entries(state,inputrec,cr->nnodes);
/* remove when vv and rerun works correctly! */
if (PAR(cr) && EI_VV(inputrec->eI) && ((Flags & MD_RERUN) || (Flags & MD_RERUN_VSITE)))
{
gmx_fatal(FARGS,
"Currently can't do velocity verlet with rerun in parallel.");
}
/* A parallel command line option consistency check that we can
only do after any threads have started. */
if (!PAR(cr) &&
(ddxyz[XX] > 1 || ddxyz[YY] > 1 || ddxyz[ZZ] > 1 || cr->npmenodes > 0))
{
gmx_fatal(FARGS,
"The -dd or -npme option request a parallel simulation, "
#ifndef GMX_MPI
"but %s was compiled without threads or MPI enabled"
#else
#ifdef GMX_THREAD_MPI
"but the number of threads (option -nt) is 1"
#else
"but %s was not started through mpirun/mpiexec or only one process was requested through mpirun/mpiexec"
#endif
#endif
, ShortProgram()
);
}
if ((Flags & MD_RERUN) &&
(EI_ENERGY_MINIMIZATION(inputrec->eI) || eiNM == inputrec->eI))
{
gmx_fatal(FARGS, "The .mdp file specified an energy mininization or normal mode algorithm, and these are not compatible with mdrun -rerun");
}
if (can_use_allvsall(inputrec,mtop,TRUE,cr,fplog) && PAR(cr))
{
/* All-vs-all loops do not work with domain decomposition */
Flags |= MD_PARTDEC;
}
if (!EEL_PME(inputrec->coulombtype) || (Flags & MD_PARTDEC))
{
if (cr->npmenodes > 0)
{
if (!EEL_PME(inputrec->coulombtype))
{
gmx_fatal_collective(FARGS,cr,NULL,
"PME nodes are requested, but the system does not use PME electrostatics");
}
if (Flags & MD_PARTDEC)
{
gmx_fatal_collective(FARGS,cr,NULL,
"PME nodes are requested, but particle decomposition does not support separate PME nodes");
}
}
cr->npmenodes = 0;
}
#ifdef GMX_FAHCORE
fcRegisterSteps(inputrec->nsteps,inputrec->init_step);
#endif
/* NMR restraints must be initialized before load_checkpoint,
* since with time averaging the history is added to t_state.
* For proper consistency check we therefore need to extend
* t_state here.
* So the PME-only nodes (if present) will also initialize
* the distance restraints.
*/
snew(fcd,1);
/* This needs to be called before read_checkpoint to extend the state */
init_disres(fplog,mtop,inputrec,cr,Flags & MD_PARTDEC,fcd,state);
if (gmx_mtop_ftype_count(mtop,F_ORIRES) > 0)
{
if (PAR(cr) && !(Flags & MD_PARTDEC))
{
gmx_fatal(FARGS,"Orientation restraints do not work (yet) with domain decomposition, use particle decomposition (mdrun option -pd)");
}
/* Orientation restraints */
if (MASTER(cr))
{
init_orires(fplog,mtop,state->x,inputrec,cr->ms,&(fcd->orires),
state);
}
}
if (DEFORM(*inputrec))
{
/* Store the deform reference box before reading the checkpoint */
if (SIMMASTER(cr))
{
copy_mat(state->box,box);
}
if (PAR(cr))
{
gmx_bcast(sizeof(box),box,cr);
}
/* Because we do not have the update struct available yet
* in which the reference values should be stored,
* we store them temporarily in static variables.
* This should be thread safe, since they are only written once
* and with identical values.
*/
#ifdef GMX_THREAD_MPI
tMPI_Thread_mutex_lock(&deform_init_box_mutex);
#endif
deform_init_init_step_tpx = inputrec->init_step;
copy_mat(box,deform_init_box_tpx);
#ifdef GMX_THREAD_MPI
tMPI_Thread_mutex_unlock(&deform_init_box_mutex);
#endif
}
if (opt2bSet("-cpi",nfile,fnm))
{
/* Check if checkpoint file exists before doing continuation.
* This way we can use identical input options for the first and subsequent runs...
*/
if( gmx_fexist_master(opt2fn_master("-cpi",nfile,fnm,cr),cr) )
{
load_checkpoint(opt2fn_master("-cpi",nfile,fnm,cr),&fplog,
cr,Flags & MD_PARTDEC,ddxyz,
inputrec,state,&bReadRNG,&bReadEkin,
(Flags & MD_APPENDFILES),
(Flags & MD_APPENDFILESSET));
if (bReadRNG)
{
Flags |= MD_READ_RNG;
}
if (bReadEkin)
{
Flags |= MD_READ_EKIN;
}
}
}
if (((MASTER(cr) || (Flags & MD_SEPPOT)) && (Flags & MD_APPENDFILES))
#ifdef GMX_THREAD_MPI
/* With thread MPI only the master node/thread exists in mdrun.c,
* therefore non-master nodes need to open the "seppot" log file here.
*/
|| (!MASTER(cr) && (Flags & MD_SEPPOT))
#endif
)
{
gmx_log_open(ftp2fn(efLOG,nfile,fnm),cr,!(Flags & MD_SEPPOT),
Flags,&fplog);
}
/* override nsteps with value from cmdline */
override_nsteps_cmdline(fplog, nsteps_cmdline, inputrec, cr);
if (SIMMASTER(cr))
{
copy_mat(state->box,box);
}
if (PAR(cr))
{
gmx_bcast(sizeof(box),box,cr);
}
/* Essential dynamics */
if (opt2bSet("-ei",nfile,fnm))
{
/* Open input and output files, allocate space for ED data structure */
ed = ed_open(nfile,fnm,Flags,cr);
}
if (PAR(cr) && !((Flags & MD_PARTDEC) ||
EI_TPI(inputrec->eI) ||
inputrec->eI == eiNM))
{
cr->dd = init_domain_decomposition(fplog,cr,Flags,ddxyz,rdd,rconstr,
dddlb_opt,dlb_scale,
ddcsx,ddcsy,ddcsz,
mtop,inputrec,
box,state->x,
&ddbox,&npme_major,&npme_minor);
make_dd_communicators(fplog,cr,dd_node_order);
/* Set overallocation to avoid frequent reallocation of arrays */
set_over_alloc_dd(TRUE);
}
else
{
/* PME, if used, is done on all nodes with 1D decomposition */
cr->npmenodes = 0;
cr->duty = (DUTY_PP | DUTY_PME);
npme_major = 1;
npme_minor = 1;
if (!EI_TPI(inputrec->eI))
{
npme_major = cr->nnodes;
}
if (inputrec->ePBC == epbcSCREW)
{
gmx_fatal(FARGS,
"pbc=%s is only implemented with domain decomposition",
epbc_names[inputrec->ePBC]);
}
}
if (PAR(cr))
{
/* After possible communicator splitting in make_dd_communicators.
* we can set up the intra/inter node communication.
*/
gmx_setup_nodecomm(fplog,cr);
}
/* Initialize per-physical-node MPI process/thread ID and counters. */
gmx_init_intranode_counters(cr);
#ifdef GMX_MPI
md_print_info(cr,fplog,"Using %d MPI %s\n",
cr->nnodes,
#ifdef GMX_THREAD_MPI
cr->nnodes==1 ? "thread" : "threads"
#else
cr->nnodes==1 ? "process" : "processes"
#endif
);
fflush(stderr);
#endif
gmx_omp_nthreads_init(fplog, cr,
hwinfo->nthreads_hw_avail,
hw_opt->nthreads_omp,
hw_opt->nthreads_omp_pme,
(cr->duty & DUTY_PP) == 0,
inputrec->cutoff_scheme == ecutsVERLET);
gmx_check_hw_runconf_consistency(fplog, hwinfo, cr, hw_opt->nthreads_tmpi, minf.bUseGPU);
/* getting number of PP/PME threads
PME: env variable should be read only on one node to make sure it is
identical everywhere;
*/
/* TODO nthreads_pp is only used for pinning threads.
* This is a temporary solution until we have a hw topology library.
*/
nthreads_pp = gmx_omp_nthreads_get(emntNonbonded);
nthreads_pme = gmx_omp_nthreads_get(emntPME);
wcycle = wallcycle_init(fplog,resetstep,cr,nthreads_pp,nthreads_pme);
if (PAR(cr))
{
/* Master synchronizes its value of reset_counters with all nodes
* including PME only nodes */
reset_counters = wcycle_get_reset_counters(wcycle);
gmx_bcast_sim(sizeof(reset_counters),&reset_counters,cr);
wcycle_set_reset_counters(wcycle, reset_counters);
}
snew(nrnb,1);
if (cr->duty & DUTY_PP)
{
/* For domain decomposition we allocate dynamically
* in dd_partition_system.
*/
if (DOMAINDECOMP(cr))
{
bcast_state_setup(cr,state);
}
else
{
if (PAR(cr))
{
bcast_state(cr,state,TRUE);
}
}
/* Initiate forcerecord */
fr = mk_forcerec();
fr->hwinfo = hwinfo;
init_forcerec(fplog,oenv,fr,fcd,inputrec,mtop,cr,box,FALSE,
opt2fn("-table",nfile,fnm),
opt2fn("-tabletf",nfile,fnm),
opt2fn("-tablep",nfile,fnm),
opt2fn("-tableb",nfile,fnm),
nbpu_opt,
FALSE,pforce);
/* version for PCA_NOT_READ_NODE (see md.c) */
/*init_forcerec(fplog,fr,fcd,inputrec,mtop,cr,box,FALSE,
"nofile","nofile","nofile","nofile",FALSE,pforce);
*/
fr->bSepDVDL = ((Flags & MD_SEPPOT) == MD_SEPPOT);
/* Initialize QM-MM */
if(fr->bQMMM)
{
init_QMMMrec(cr,box,mtop,inputrec,fr);
}
/* Initialize the mdatoms structure.
* mdatoms is not filled with atom data,
* as this can not be done now with domain decomposition.
*/
mdatoms = init_mdatoms(fplog,mtop,inputrec->efep!=efepNO);
/* Initialize the virtual site communication */
vsite = init_vsite(mtop,cr,FALSE);
calc_shifts(box,fr->shift_vec);
/* With periodic molecules the charge groups should be whole at start up
* and the virtual sites should not be far from their proper positions.
*/
if (!inputrec->bContinuation && MASTER(cr) &&
!(inputrec->ePBC != epbcNONE && inputrec->bPeriodicMols))
{
/* Make molecules whole at start of run */
if (fr->ePBC != epbcNONE)
{
do_pbc_first_mtop(fplog,inputrec->ePBC,box,mtop,state->x);
}
if (vsite)
{
/* Correct initial vsite positions are required
* for the initial distribution in the domain decomposition
* and for the initial shell prediction.
*/
construct_vsites_mtop(fplog,vsite,mtop,state->x);
}
}
if (EEL_PME(fr->eeltype))
{
ewaldcoeff = fr->ewaldcoeff;
pmedata = &fr->pmedata;
}
else
{
pmedata = NULL;
}
}
else
{
/* This is a PME only node */
/* We don't need the state */
done_state(state);
ewaldcoeff = calc_ewaldcoeff(inputrec->rcoulomb, inputrec->ewald_rtol);
snew(pmedata,1);
}
/* Before setting affinity, check whether the affinity has changed
* - which indicates that probably the OpenMP library has changed it since
* we first checked). */
check_cpu_affinity_set(fplog, cr, hw_opt, hwinfo->nthreads_hw_avail, TRUE);
/* Set the CPU affinity */
set_cpu_affinity(fplog,cr,hw_opt,nthreads_pme,hwinfo,inputrec);
/* Initiate PME if necessary,
* either on all nodes or on dedicated PME nodes only. */
if (EEL_PME(inputrec->coulombtype))
{
if (mdatoms)
{
nChargePerturbed = mdatoms->nChargePerturbed;
}
if (cr->npmenodes > 0)
{
/* The PME only nodes need to know nChargePerturbed */
gmx_bcast_sim(sizeof(nChargePerturbed),&nChargePerturbed,cr);
}
if (cr->duty & DUTY_PME)
{
status = gmx_pme_init(pmedata,cr,npme_major,npme_minor,inputrec,
mtop ? mtop->natoms : 0,nChargePerturbed,
(Flags & MD_REPRODUCIBLE),nthreads_pme);
if (status != 0)
{
gmx_fatal(FARGS,"Error %d initializing PME",status);
}
}
}
if (integrator[inputrec->eI].func == do_md
||
integrator[inputrec->eI].func == do_md_openmm
)
{
/* Turn on signal handling on all nodes */
/*
* (A user signal from the PME nodes (if any)
* is communicated to the PP nodes.
*/
signal_handler_install();
}
if (cr->duty & DUTY_PP)
{
if (inputrec->ePull != epullNO)
{
/* Initialize pull code */
init_pull(fplog,inputrec,nfile,fnm,mtop,cr,oenv, inputrec->fepvals->init_lambda,
EI_DYNAMICS(inputrec->eI) && MASTER(cr),Flags);
}
if (inputrec->bRot)
{
/* Initialize enforced rotation code */
init_rot(fplog,inputrec,nfile,fnm,cr,state->x,box,mtop,oenv,
bVerbose,Flags);
}
constr = init_constraints(fplog,mtop,inputrec,ed,state,cr);
if (DOMAINDECOMP(cr))
{
dd_init_bondeds(fplog,cr->dd,mtop,vsite,constr,inputrec,
Flags & MD_DDBONDCHECK,fr->cginfo_mb);
set_dd_parameters(fplog,cr->dd,dlb_scale,inputrec,fr,&ddbox);
setup_dd_grid(fplog,cr->dd);
}
/* Now do whatever the user wants us to do (how flexible...) */
integrator[inputrec->eI].func(fplog,cr,nfile,fnm,
oenv,bVerbose,bCompact,
nstglobalcomm,
vsite,constr,
nstepout,inputrec,mtop,
fcd,state,
mdatoms,nrnb,wcycle,ed,fr,
repl_ex_nst,repl_ex_nex,repl_ex_seed,
membed,
cpt_period,max_hours,
deviceOptions,
Flags,
&runtime);
if (inputrec->ePull != epullNO)
{
finish_pull(fplog,inputrec->pull);
}
if (inputrec->bRot)
{
finish_rot(inputrec->rot);
}
}
else
{
/* do PME only */
gmx_pmeonly(*pmedata,cr,nrnb,wcycle,ewaldcoeff,FALSE,inputrec);
}
if (EI_DYNAMICS(inputrec->eI) || EI_TPI(inputrec->eI))
{
/* Some timing stats */
if (SIMMASTER(cr))
{
if (runtime.proc == 0)
{
runtime.proc = runtime.real;
}
}
else
{
runtime.real = 0;
}
}
wallcycle_stop(wcycle,ewcRUN);
/* Finish up, write some stuff
* if rerunMD, don't write last frame again
*/
finish_run(fplog,cr,ftp2fn(efSTO,nfile,fnm),
inputrec,nrnb,wcycle,&runtime,
fr != NULL && fr->nbv != NULL && fr->nbv->bUseGPU ?
nbnxn_cuda_get_timings(fr->nbv->cu_nbv) : NULL,
nthreads_pp,
EI_DYNAMICS(inputrec->eI) && !MULTISIM(cr));
if ((cr->duty & DUTY_PP) && fr->nbv != NULL && fr->nbv->bUseGPU)
{
char gpu_err_str[STRLEN];
/* free GPU memory and uninitialize GPU (by destroying the context) */
nbnxn_cuda_free(fplog, fr->nbv->cu_nbv);
if (!free_gpu(gpu_err_str))
{
gmx_warning("On node %d failed to free GPU #%d: %s",
cr->nodeid, get_current_gpu_device_id(), gpu_err_str);
}
}
if (opt2bSet("-membed",nfile,fnm))
{
sfree(membed);
}
#ifdef GMX_THREAD_MPI
if (PAR(cr) && SIMMASTER(cr))
#endif
{
gmx_hardware_info_free(hwinfo);
}
/* Does what it says */
print_date_and_time(fplog,cr->nodeid,"Finished mdrun",&runtime);
/* Close logfile already here if we were appending to it */
if (MASTER(cr) && (Flags & MD_APPENDFILES))
{
gmx_log_close(fplog);
}
rc=(int)gmx_get_stop_condition();
#ifdef GMX_THREAD_MPI
/* we need to join all threads. The sub-threads join when they
exit this function, but the master thread needs to be told to
wait for that. */
if (PAR(cr) && MASTER(cr))
{
tMPI_Finalize();
}
#endif
return rc;
}
|
tinyexr.h | /*
Copyright (c) 2014 - 2015, Syoyo Fujita
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the <organization> nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __TINYEXR_H__
#define __TINYEXR_H__
//
//
// Do this:
// #define TINYEXR_IMPLEMENTATION
// before you include this file in *one* C or C++ file to create the implementation.
//
// // i.e. it should look like this:
// #include ...
// #include ...
// #include ...
// #define TINYEXR_IMPLEMENTATION
// #include "tinyexr.h"
//
//
#include <stddef.h> // for size_t
#ifdef __cplusplus
extern "C" {
#endif
// pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2
#define TINYEXR_PIXELTYPE_UINT (0)
#define TINYEXR_PIXELTYPE_HALF (1)
#define TINYEXR_PIXELTYPE_FLOAT (2)
#define TINYEXR_MAX_ATTRIBUTES (128)
typedef struct _EXRAttribute {
char *name;
char *type;
int size;
unsigned char *value; // uint8_t*
} EXRAttribute;
typedef struct _EXRImage {
// Custom attributes(exludes required attributes(e.g. `channels`, `compression`, etc)
EXRAttribute custom_attributes[TINYEXR_MAX_ATTRIBUTES];
int num_custom_attributes;
int num_channels;
const char **channel_names;
unsigned char **images; // image[channels][pixels]
int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for
// each channel
int *requested_pixel_types; // Filled initially by
// ParseEXRHeaderFrom(Meomory|File), then users
// can edit it(only valid for HALF pixel type
// channel)
int width;
int height;
float pixel_aspect_ratio;
int line_order;
int data_window[4];
int display_window[4];
float screen_window_center[2];
float screen_window_width;
} EXRImage;
typedef struct _DeepImage {
int num_channels;
const char **channel_names;
float ***image; // image[channels][scanlines][samples]
int **offset_table; // offset_table[scanline][offsets]
int width;
int height;
} DeepImage;
// @deprecated { to be removed. }
// Loads single-frame OpenEXR image. Assume EXR image contains RGB(A) channels.
// Application must free image data as returned by `out_rgba`
// Result image format is: float x RGBA x width x hight
// Return 0 if success
// Returns error string in `err` when there's an error
extern int LoadEXR(float **out_rgba, int *width, int *height,
const char *filename, const char **err);
// Parse single-frame OpenEXR header from a file and initialize `EXRImage`
// struct.
// Users then call LoadMultiChannelEXRFromFile to actually load image data into
// `EXRImage`
extern int ParseMultiChannelEXRHeaderFromFile(EXRImage *image,
const char *filename,
const char **err);
// Parse single-frame OpenEXR header from a memory and initialize `EXRImage`
// struct.
// Users then call LoadMultiChannelEXRFromMemory to actually load image data
// into `EXRImage`
extern int ParseMultiChannelEXRHeaderFromMemory(EXRImage *image,
const unsigned char *memory,
const char **err);
// Loads multi-channel, single-frame OpenEXR image from a file.
// Application must setup `ParseMultiChannelEXRHeaderFromFile` before calling
// `LoadMultiChannelEXRFromFile`.
// Application can free EXRImage using `FreeExrImage`
// Return 0 if success
// Returns error string in `err` when there's an error
extern int LoadMultiChannelEXRFromFile(EXRImage *image, const char *filename,
const char **err);
// Loads multi-channel, single-frame OpenEXR image from a memory.
// Application must setup `EXRImage` with `ParseMultiChannelEXRHeaderFromMemory`
// before calling `LoadMultiChannelEXRFromMemory`.
// Application can free EXRImage using `FreeExrImage`
// Return 0 if success
// Returns error string in `err` when there's an error
extern int LoadMultiChannelEXRFromMemory(EXRImage *image,
const unsigned char *memory,
const char **err);
// Saves floating point RGBA image as OpenEXR.
// Image is compressed with ZIP.
// Return 0 if success
// Returns error string in `err` when there's an error
// extern int SaveEXR(const float *in_rgba, int width, int height,
// const char *filename, const char **err);
// Saves multi-channel, single-frame OpenEXR image to a file.
// Application must free EXRImage
// Returns 0 if success
// Returns error string in `err` when there's an error
extern int SaveMultiChannelEXRToFile(const EXRImage *image,
const char *filename, const char **err);
// Saves multi-channel, single-frame OpenEXR image to a memory.
// Application must free EXRImage
// Return the number of bytes if succes.
// Retruns 0 if success, negative number when failed.
// Returns error string in `err` when there's an error
extern size_t SaveMultiChannelEXRToMemory(const EXRImage *image,
unsigned char **memory,
const char **err);
// Loads single-frame OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// Returns 0 if success
// Returns error string in `err` when there's an error
extern int LoadDeepEXR(DeepImage *out_image, const char *filename,
const char **err);
// NOT YET IMPLEMENTED:
// Saves single-frame OpenEXR deep image.
// Return 0 if success
// Returns error string in `err` when there's an error
// extern int SaveDeepEXR(const DeepImage *in_image, const char *filename,
// const char **err);
// NOT YET IMPLEMENTED:
// Loads multi-part OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const
// char *filename,
// const char **err);
// Initialize of EXRImage struct
extern void InitEXRImage(EXRImage *exrImage);
// Free's internal data of EXRImage struct
// Returns 0 if success.
extern int FreeEXRImage(EXRImage *exrImage);
// For emscripten.
// Parse single-frame OpenEXR header from memory.
// Return 0 if success
extern int ParseEXRHeaderFromMemory(EXRAttribute* customAttributes, int *numCustomAttributes, int *width, int *height,
const unsigned char *memory);
// For emscripten.
// Loads single-frame OpenEXR image from memory. Assume EXR image contains
// RGB(A) channels.
// `out_rgba` must have enough memory(at least sizeof(float) x 4(RGBA) x width x
// hight)
// Return 0 if success
// Returns error string in `err` when there's an error
extern int LoadEXRFromMemory(float *out_rgba, const unsigned char *memory,
const char **err);
#ifdef __cplusplus
}
#endif
#ifdef TINYEXR_IMPLEMENTATION
#include <cstdio>
#include <cstdlib>
#include <cassert>
#include <cstring>
#include <algorithm>
#include <string>
#include <vector>
#include "tinyexr.h"
#ifdef _OPENMP
#include <omp.h>
#endif
namespace {
namespace miniz {
/* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP
reading/writing/appending, PNG writing
See "unlicense" statement at the end of this file.
Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013
Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951:
http://www.ietf.org/rfc/rfc1951.txt
Most API's defined in miniz.c are optional. For example, to disable the
archive related functions just define
MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO
(see the list below for more macros).
* Change History
10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major
release with Zip64 support (almost there!):
- Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug
(thanks kahmyong.moon@hp.com) which could cause locate files to not find
files. This bug
would only have occured in earlier versions if you explicitly used this
flag, OR if you used mz_zip_extract_archive_file_to_heap() or
mz_zip_add_mem_to_archive_file_in_place()
(which used this flag). If you can't switch to v1.15 but want to fix
this bug, just remove the uses of this flag from both helper funcs (and of
course don't use the flag).
- Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when
pUser_read_buf is not NULL and compressed size is > uncompressed size
- Fixing mz_zip_reader_extract_*() funcs so they don't try to extract
compressed data from directory entries, to account for weird zipfiles which
contain zero-size compressed data on dir entries.
Hopefully this fix won't cause any issues on weird zip archives,
because it assumes the low 16-bits of zip external attributes are DOS
attributes (which I believe they always are in practice).
- Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the
internal attributes, just the filename and external attributes
- mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed
- Added cmake support for Linux builds which builds all the examples,
tested with clang v3.3 and gcc v4.6.
- Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti
- Merged MZ_FORCEINLINE fix from hdeanclark
- Fix <time.h> include before config #ifdef, thanks emil.brink
- Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping
(super useful for OpenGL apps), and explicit control over the compression
level (so you can
set it to 1 for real-time compression).
- Merged in some compiler fixes from paulharris's github repro.
- Retested this build under Windows (VS 2010, including static analysis),
tcc 0.9.26, gcc v4.6 and clang v3.3.
- Added example6.c, which dumps an image of the mandelbrot set to a PNG
file.
- Modified example2 to help test the
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more.
- In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix
possible src file fclose() leak if alignment bytes+local header file write
faiiled
- In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader():
Was pushing the wrong central dir header offset, appears harmless in this
release, but it became a problem in the zip64 branch
5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE,
#include <time.h> (thanks fermtect).
5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix
mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit.
- Temporarily/locally slammed in "typedef unsigned long mz_ulong" and
re-ran a randomized regression test on ~500k files.
- Eliminated a bunch of warnings when compiling with GCC 32-bit/64.
- Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze
(static analysis) option and fixed all warnings (except for the silly
"Use of the comma-operator in a tested expression.." analysis warning,
which I purposely use to work around a MSVC compiler warning).
- Created 32-bit and 64-bit Codeblocks projects/workspace. Built and
tested Linux executables. The codeblocks workspace is compatible with
Linux+Win32/x64.
- Added miniz_tester solution/project, which is a useful little app
derived from LZHAM's tester app that I use as part of the regression test.
- Ran miniz.c and tinfl.c through another series of regression testing on
~500,000 files and archives.
- Modified example5.c so it purposely disables a bunch of high-level
functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the
MINIZ_NO_STDIO bug report.)
- Fix ftell() usage in examples so they exit with an error on files which
are too large (a limitation of the examples, not miniz itself).
4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple
minor level_and_flags issues in the archive API's.
level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce
Dawson <bruced@valvesoftware.com> for the feedback/bug report.
5/28/11 v1.11 - Added statement from unlicense.org
5/27/11 v1.10 - Substantial compressor optimizations:
- Level 1 is now ~4x faster than before. The L1 compressor's throughput
now varies between 70-110MB/sec. on a
- Core i7 (actual throughput varies depending on the type of data, and x64
vs. x86).
- Improved baseline L2-L9 compression perf. Also, greatly improved
compression perf. issues on some file types.
- Refactored the compression code for better readability and
maintainability.
- Added level 10 compression level (L10 has slightly better ratio than
level 9, but could have a potentially large
drop in throughput on some files).
5/15/11 v1.09 - Initial stable release.
* Low-level Deflate/Inflate implementation notes:
Compression: Use the "tdefl" API's. The compressor supports raw, static,
and dynamic blocks, lazy or
greedy parsing, match length filtering, RLE-only, and Huffman-only streams.
It performs and compresses
approximately as well as zlib.
Decompression: Use the "tinfl" API's. The entire decompressor is
implemented as a single function
coroutine: see tinfl_decompress(). It supports decompression into a 32KB
(or larger power of 2) wrapping buffer, or into a memory
block large enough to hold the entire file.
The low-level tdefl/tinfl API's do not make any use of dynamic memory
allocation.
* zlib-style API notes:
miniz.c implements a fairly large subset of zlib. There's enough
functionality present for it to be a drop-in
zlib replacement in many apps:
The z_stream struct, optional memory allocation callbacks
deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound
inflateInit/inflateInit2/inflate/inflateEnd
compress, compress2, compressBound, uncompress
CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly
routines.
Supports raw deflate streams or standard zlib streams with adler-32
checking.
Limitations:
The callback API's are not implemented yet. No support for gzip headers or
zlib static dictionaries.
I've tried to closely emulate zlib's various flavors of stream flushing
and return status codes, but
there are no guarantees that miniz.c pulls this off perfectly.
* PNG writing: See the tdefl_write_image_to_png_file_in_memory() function,
originally written by
Alex Evans. Supports 1-4 bytes/pixel images.
* ZIP archive API notes:
The ZIP archive API's where designed with simplicity and efficiency in
mind, with just enough abstraction to
get the job done with minimal fuss. There are simple API's to retrieve file
information, read files from
existing archives, create new archives, append new files to existing
archives, or clone archive data from
one archive to another. It supports archives located in memory or the heap,
on disk (using stdio.h),
or you can specify custom file read/write callbacks.
- Archive reading: Just call this function to read a single file from a
disk archive:
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const
char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
For more complex cases, use the "mz_zip_reader" functions. Upon opening an
archive, the entire central
directory is located and read as-is into memory, and subsequent file access
only occurs when reading individual files.
- Archives file scanning: The simple way is to use this function to scan a
loaded archive for a specific file:
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
The locate operation can optionally check file comments too, which (as one
example) can be used to identify
multiple versions of the same file in an archive. This function uses a
simple linear search through the central
directory, so it's not very fast.
Alternately, you can iterate through all the files in an archive (using
mz_zip_reader_get_num_files()) and
retrieve detailed info on each file by calling mz_zip_reader_file_stat().
- Archive creation: Use the "mz_zip_writer" functions. The ZIP writer
immediately writes compressed file data
to disk and builds an exact image of the central directory in memory. The
central directory image is written
all at once at the end of the archive file when the archive is finalized.
The archive writer can optionally align each file's local header and file
data to any power of 2 alignment,
which can be useful when the archive will be read from optical media. Also,
the writer supports placing
arbitrary data blobs at the very beginning of ZIP archives. Archives
written using either feature are still
readable by any ZIP tool.
- Archive appending: The simple way to add a single file to an archive is
to call this function:
mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename,
const char *pArchive_name,
const void *pBuf, size_t buf_size, const void *pComment, mz_uint16
comment_size, mz_uint level_and_flags);
The archive will be created if it doesn't already exist, otherwise it'll be
appended to.
Note the appending is done in-place and is not an atomic operation, so if
something goes wrong
during the operation it's possible the archive could be left without a
central directory (although the local
file headers and file data will be fine, so the archive will be
recoverable).
For more complex archive modification scenarios:
1. The safest way is to use a mz_zip_reader to read the existing archive,
cloning only those bits you want to
preserve into a new archive using using the
mz_zip_writer_add_from_zip_reader() function (which compiles the
compressed file data as-is). When you're done, delete the old archive and
rename the newly written archive, and
you're done. This is safe but requires a bunch of temporary disk space or
heap memory.
2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using
mz_zip_writer_init_from_reader(),
append new files as needed, then finalize the archive which will write an
updated central directory to the
original archive. (This is basically what
mz_zip_add_mem_to_archive_file_in_place() does.) There's a
possibility that the archive's central directory could be lost with this
method if anything goes wrong, though.
- ZIP archive support limitations:
No zip64 or spanning support. Extraction functions can only handle
unencrypted, stored or deflated files.
Requires streams capable of seeking.
* This is a header file library, like stb_image.c. To get only a header file,
either cut and paste the
below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then
include miniz.c from it.
* Important: For best perf. be sure to customize the below macros for your
target platform:
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_LITTLE_ENDIAN 1
#define MINIZ_HAS_64BIT_REGISTERS 1
* On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before
including miniz.c to ensure miniz
uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be
able to process large files
(i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes).
*/
#ifndef MINIZ_HEADER_INCLUDED
#define MINIZ_HEADER_INCLUDED
#include <stdlib.h>
// Defines to completely disable specific portions of miniz.c:
// If all macros here are defined the only functionality remaining will be
// CRC-32, adler-32, tinfl, and tdefl.
// Define MINIZ_NO_STDIO to disable all usage and any functions which rely on
// stdio for file I/O.
//#define MINIZ_NO_STDIO
// If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able
// to get the current time, or
// get/set file times, and the C run-time funcs that get/set times won't be
// called.
// The current downside is the times written to your archives will be from 1979.
//#define MINIZ_NO_TIME
// Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's.
//#define MINIZ_NO_ARCHIVE_APIS
// Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive
// API's.
//#define MINIZ_NO_ARCHIVE_WRITING_APIS
// Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression
// API's.
//#define MINIZ_NO_ZLIB_APIS
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent
// conflicts against stock zlib.
//#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES
// Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc.
// Note if MINIZ_NO_MALLOC is defined then the user must always provide custom
// user alloc/free/realloc
// callbacks to the zlib and archive API's, and a few stand-alone helper API's
// which don't provide custom user
// functions (such as tdefl_compress_mem_to_heap() and
// tinfl_decompress_mem_to_heap()) won't work.
//#define MINIZ_NO_MALLOC
#if defined(__TINYC__) && (defined(__linux) || defined(__linux__))
// TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc
// on Linux
#define MINIZ_NO_TIME
#endif
#if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS)
#include <time.h>
#endif
#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
defined(__i386) || defined(__i486__) || defined(__i486) || \
defined(i386) || defined(__ia64__) || defined(__x86_64__)
// MINIZ_X86_OR_X64_CPU is only used to help set the below macros.
#define MINIZ_X86_OR_X64_CPU 1
#endif
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
#define MINIZ_LITTLE_ENDIAN 1
#endif
#if MINIZ_X86_OR_X64_CPU
// Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient
// integer loads and stores from unaligned addresses.
//#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 0 // disable to suppress compiler warnings
#endif
#if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \
defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \
defined(__x86_64__)
// Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are
// reasonably fast (and don't involve compiler generated calls to helper
// functions).
#define MINIZ_HAS_64BIT_REGISTERS 1
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API Definitions.
// For more compatibility with zlib, miniz.c uses unsigned long for some
// parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits!
typedef unsigned long mz_ulong;
// mz_free() internally uses the MZ_FREE() macro (which by default calls free()
// unless you've modified the MZ_MALLOC macro) to release a block allocated from
// the heap.
void mz_free(void *p);
#define MZ_ADLER32_INIT (1)
// mz_adler32() returns the initial adler-32 value to use when called with
// ptr==NULL.
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len);
#define MZ_CRC32_INIT (0)
// mz_crc32() returns the initial CRC-32 value to use when called with
// ptr==NULL.
mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len);
// Compression strategies.
enum {
MZ_DEFAULT_STRATEGY = 0,
MZ_FILTERED = 1,
MZ_HUFFMAN_ONLY = 2,
MZ_RLE = 3,
MZ_FIXED = 4
};
// Method
#define MZ_DEFLATED 8
#ifndef MINIZ_NO_ZLIB_APIS
// Heap allocation callbacks.
// Note that mz_alloc_func parameter types purpsosely differ from zlib's:
// items/size is size_t, not unsigned long.
typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size);
typedef void (*mz_free_func)(void *opaque, void *address);
typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items,
size_t size);
#define MZ_VERSION "9.1.15"
#define MZ_VERNUM 0x91F0
#define MZ_VER_MAJOR 9
#define MZ_VER_MINOR 1
#define MZ_VER_REVISION 15
#define MZ_VER_SUBREVISION 0
// Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The
// other values are for advanced use (refer to the zlib docs).
enum {
MZ_NO_FLUSH = 0,
MZ_PARTIAL_FLUSH = 1,
MZ_SYNC_FLUSH = 2,
MZ_FULL_FLUSH = 3,
MZ_FINISH = 4,
MZ_BLOCK = 5
};
// Return status codes. MZ_PARAM_ERROR is non-standard.
enum {
MZ_OK = 0,
MZ_STREAM_END = 1,
MZ_NEED_DICT = 2,
MZ_ERRNO = -1,
MZ_STREAM_ERROR = -2,
MZ_DATA_ERROR = -3,
MZ_MEM_ERROR = -4,
MZ_BUF_ERROR = -5,
MZ_VERSION_ERROR = -6,
MZ_PARAM_ERROR = -10000
};
// Compression levels: 0-9 are the standard zlib-style levels, 10 is best
// possible compression (not zlib compatible, and may be very slow),
// MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL.
enum {
MZ_NO_COMPRESSION = 0,
MZ_BEST_SPEED = 1,
MZ_BEST_COMPRESSION = 9,
MZ_UBER_COMPRESSION = 10,
MZ_DEFAULT_LEVEL = 6,
MZ_DEFAULT_COMPRESSION = -1
};
// Window bits
#define MZ_DEFAULT_WINDOW_BITS 15
struct mz_internal_state;
// Compression/decompression stream struct.
typedef struct mz_stream_s {
const unsigned char *next_in; // pointer to next byte to read
unsigned int avail_in; // number of bytes available at next_in
mz_ulong total_in; // total number of bytes consumed so far
unsigned char *next_out; // pointer to next byte to write
unsigned int avail_out; // number of bytes that can be written to next_out
mz_ulong total_out; // total number of bytes produced so far
char *msg; // error msg (unused)
struct mz_internal_state *state; // internal state, allocated by zalloc/zfree
mz_alloc_func
zalloc; // optional heap allocation function (defaults to malloc)
mz_free_func zfree; // optional heap free function (defaults to free)
void *opaque; // heap alloc function user pointer
int data_type; // data_type (unused)
mz_ulong adler; // adler32 of the source or uncompressed data
mz_ulong reserved; // not used
} mz_stream;
typedef mz_stream *mz_streamp;
// Returns the version string of miniz.c.
const char *mz_version(void);
// mz_deflateInit() initializes a compressor with default options:
// Parameters:
// pStream must point to an initialized mz_stream struct.
// level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION].
// level 1 enables a specially optimized compression function that's been
// optimized purely for performance, not ratio.
// (This special func. is currently only enabled when
// MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.)
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if the input parameters are bogus.
// MZ_MEM_ERROR on out of memory.
int mz_deflateInit(mz_streamp pStream, int level);
// mz_deflateInit2() is like mz_deflate(), except with more control:
// Additional parameters:
// method must be MZ_DEFLATED
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with
// zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no
// header or footer)
// mem_level must be between [1, 9] (it's checked but ignored by miniz.c)
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy);
// Quickly resets a compressor without having to reallocate anything. Same as
// calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2().
int mz_deflateReset(mz_streamp pStream);
// mz_deflate() compresses the input to output, consuming as much of the input
// and producing as much output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or
// MZ_FINISH.
// Return values:
// MZ_OK on success (when flushing, or if more input is needed but not
// available, and/or there's more output to be written but the output buffer
// is full).
// MZ_STREAM_END if all input has been consumed and all output bytes have been
// written. Don't call mz_deflate() on the stream anymore.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input and/or
// output buffers are empty. (Fill up the input buffer or free up some output
// space and try again.)
int mz_deflate(mz_streamp pStream, int flush);
// mz_deflateEnd() deinitializes a compressor:
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
int mz_deflateEnd(mz_streamp pStream);
// mz_deflateBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by deflate(), assuming flush is set to only
// MZ_NO_FLUSH or MZ_FINISH.
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len);
// Single-call compression functions mz_compress() and mz_compress2():
// Returns MZ_OK on success, or one of the error codes from mz_deflate() on
// failure.
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level);
// mz_compressBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by calling mz_compress().
mz_ulong mz_compressBound(mz_ulong source_len);
// Initializes a decompressor.
int mz_inflateInit(mz_streamp pStream);
// mz_inflateInit2() is like mz_inflateInit() with an additional option that
// controls the window size and whether or not the stream has been wrapped with
// a zlib header/footer:
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or
// -MZ_DEFAULT_WINDOW_BITS (raw deflate).
int mz_inflateInit2(mz_streamp pStream, int window_bits);
// Decompresses the input stream to the output, consuming only as much of the
// input as needed, and writing as much to the output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH.
// On the first call, if flush is MZ_FINISH it's assumed the input and output
// buffers are both sized large enough to decompress the entire stream in a
// single call (this is slightly faster).
// MZ_FINISH implies that there are no more source bytes available beside
// what's already in the input buffer, and that the output buffer is large
// enough to hold the rest of the decompressed data.
// Return values:
// MZ_OK on success. Either more input is needed but not available, and/or
// there's more output to be written but the output buffer is full.
// MZ_STREAM_END if all needed input has been consumed and all output bytes
// have been written. For zlib streams, the adler-32 of the decompressed data
// has also been verified.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_DATA_ERROR if the deflate stream is invalid.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input buffer is
// empty but the inflater needs more input to continue, or if the output
// buffer is not large enough. Call mz_inflate() again
// with more input data, or with more room in the output buffer (except when
// using single call decompression, described above).
int mz_inflate(mz_streamp pStream, int flush);
// Deinitializes a decompressor.
int mz_inflateEnd(mz_streamp pStream);
// Single-call decompression.
// Returns MZ_OK on success, or one of the error codes from mz_inflate() on
// failure.
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
// Returns a string description of the specified error code, or NULL if the
// error code is invalid.
const char *mz_error(int err);
// Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used
// as a drop-in replacement for the subset of zlib that miniz.c supports.
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you
// use zlib in the same project.
#ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
typedef unsigned char Byte;
typedef unsigned int uInt;
typedef mz_ulong uLong;
typedef Byte Bytef;
typedef uInt uIntf;
typedef char charf;
typedef int intf;
typedef void *voidpf;
typedef uLong uLongf;
typedef void *voidp;
typedef void *const voidpc;
#define Z_NULL 0
#define Z_NO_FLUSH MZ_NO_FLUSH
#define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH
#define Z_SYNC_FLUSH MZ_SYNC_FLUSH
#define Z_FULL_FLUSH MZ_FULL_FLUSH
#define Z_FINISH MZ_FINISH
#define Z_BLOCK MZ_BLOCK
#define Z_OK MZ_OK
#define Z_STREAM_END MZ_STREAM_END
#define Z_NEED_DICT MZ_NEED_DICT
#define Z_ERRNO MZ_ERRNO
#define Z_STREAM_ERROR MZ_STREAM_ERROR
#define Z_DATA_ERROR MZ_DATA_ERROR
#define Z_MEM_ERROR MZ_MEM_ERROR
#define Z_BUF_ERROR MZ_BUF_ERROR
#define Z_VERSION_ERROR MZ_VERSION_ERROR
#define Z_PARAM_ERROR MZ_PARAM_ERROR
#define Z_NO_COMPRESSION MZ_NO_COMPRESSION
#define Z_BEST_SPEED MZ_BEST_SPEED
#define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION
#define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION
#define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY
#define Z_FILTERED MZ_FILTERED
#define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY
#define Z_RLE MZ_RLE
#define Z_FIXED MZ_FIXED
#define Z_DEFLATED MZ_DEFLATED
#define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS
#define alloc_func mz_alloc_func
#define free_func mz_free_func
#define internal_state mz_internal_state
#define z_stream mz_stream
#define deflateInit mz_deflateInit
#define deflateInit2 mz_deflateInit2
#define deflateReset mz_deflateReset
#define deflate mz_deflate
#define deflateEnd mz_deflateEnd
#define deflateBound mz_deflateBound
#define compress mz_compress
#define compress2 mz_compress2
#define compressBound mz_compressBound
#define inflateInit mz_inflateInit
#define inflateInit2 mz_inflateInit2
#define inflate mz_inflate
#define inflateEnd mz_inflateEnd
#define uncompress mz_uncompress
#define crc32 mz_crc32
#define adler32 mz_adler32
#define MAX_WBITS 15
#define MAX_MEM_LEVEL 9
#define zError mz_error
#define ZLIB_VERSION MZ_VERSION
#define ZLIB_VERNUM MZ_VERNUM
#define ZLIB_VER_MAJOR MZ_VER_MAJOR
#define ZLIB_VER_MINOR MZ_VER_MINOR
#define ZLIB_VER_REVISION MZ_VER_REVISION
#define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION
#define zlibVersion mz_version
#define zlib_version mz_version()
#endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Types and macros
typedef unsigned char mz_uint8;
typedef signed short mz_int16;
typedef unsigned short mz_uint16;
typedef unsigned int mz_uint32;
typedef unsigned int mz_uint;
typedef long long mz_int64;
typedef unsigned long long mz_uint64;
typedef int mz_bool;
#define MZ_FALSE (0)
#define MZ_TRUE (1)
// An attempt to work around MSVC's spammy "warning C4127: conditional
// expression is constant" message.
#ifdef _MSC_VER
#define MZ_MACRO_END while (0, 0)
#else
#define MZ_MACRO_END while (0)
#endif
// ------------------- ZIP archive reading/writing
#ifndef MINIZ_NO_ARCHIVE_APIS
enum {
MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024,
MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260,
MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256
};
typedef struct {
mz_uint32 m_file_index;
mz_uint32 m_central_dir_ofs;
mz_uint16 m_version_made_by;
mz_uint16 m_version_needed;
mz_uint16 m_bit_flag;
mz_uint16 m_method;
#ifndef MINIZ_NO_TIME
time_t m_time;
#endif
mz_uint32 m_crc32;
mz_uint64 m_comp_size;
mz_uint64 m_uncomp_size;
mz_uint16 m_internal_attr;
mz_uint32 m_external_attr;
mz_uint64 m_local_header_ofs;
mz_uint32 m_comment_size;
char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE];
char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE];
} mz_zip_archive_file_stat;
typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n);
typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n);
struct mz_zip_internal_state_tag;
typedef struct mz_zip_internal_state_tag mz_zip_internal_state;
typedef enum {
MZ_ZIP_MODE_INVALID = 0,
MZ_ZIP_MODE_READING = 1,
MZ_ZIP_MODE_WRITING = 2,
MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3
} mz_zip_mode;
typedef struct mz_zip_archive_tag {
mz_uint64 m_archive_size;
mz_uint64 m_central_directory_file_ofs;
mz_uint m_total_files;
mz_zip_mode m_zip_mode;
mz_uint m_file_offset_alignment;
mz_alloc_func m_pAlloc;
mz_free_func m_pFree;
mz_realloc_func m_pRealloc;
void *m_pAlloc_opaque;
mz_file_read_func m_pRead;
mz_file_write_func m_pWrite;
void *m_pIO_opaque;
mz_zip_internal_state *m_pState;
} mz_zip_archive;
typedef enum {
MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100,
MZ_ZIP_FLAG_IGNORE_PATH = 0x0200,
MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400,
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800
} mz_zip_flags;
// ZIP archive reading
// Inits a ZIP archive reader.
// These functions read and validate the archive's central directory.
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags);
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags);
#endif
// Returns the total number of files in the archive.
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip);
// Returns detailed information about an archive file entry.
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat);
// Determines if an archive file entry is a directory entry.
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index);
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index);
// Retrieves the filename of an archive file entry.
// Returns the number of bytes written to pFilename, or if filename_buf_size is
// 0 this function returns the number of bytes needed to fully store the
// filename.
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size);
// Attempts to locates a file in the archive's central directory.
// Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH
// Returns -1 if the file cannot be found.
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
// Extracts a archive file to a memory buffer using no memory allocation.
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size);
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size);
// Extracts a archive file to a memory buffer.
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags);
// Extracts a archive file to a dynamically allocated heap buffer.
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags);
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags);
// Extracts a archive file using a callback function to output the file's data.
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
#ifndef MINIZ_NO_STDIO
// Extracts a archive file to a disk file and sets its last accessed and
// modified times.
// This function only extracts files, not archive directory records.
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags);
#endif
// Ends archive reading, freeing all allocations, and closing the input archive
// file if mz_zip_reader_init_file() was used.
mz_bool mz_zip_reader_end(mz_zip_archive *pZip);
// ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
// Inits a ZIP archive writer.
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size);
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning);
#endif
// Converts a ZIP archive reader object into a writer object, to allow efficient
// in-place file appends to occur on an existing archive.
// For archives opened using mz_zip_reader_init_file, pFilename must be the
// archive's filename so it can be reopened for writing. If the file can't be
// reopened, mz_zip_reader_end() will be called.
// For archives opened using mz_zip_reader_init_mem, the memory block must be
// growable using the realloc callback (which defaults to realloc unless you've
// overridden it).
// Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's
// user provided m_pWrite function cannot be NULL.
// Note: In-place archive modification is not recommended unless you know what
// you're doing, because if execution stops or something goes wrong before
// the archive is finalized the file's central directory will be hosed.
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename);
// Adds the contents of a memory buffer to an archive. These functions record
// the current local time into the archive.
// To add a directory entry, call this method with an archive name ending in a
// forwardslash with empty buffer.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags);
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32);
#ifndef MINIZ_NO_STDIO
// Adds the contents of a disk file to an archive. This function also records
// the disk file's modified time into the archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size, mz_uint level_and_flags);
#endif
// Adds a file to an archive by fully cloning the data from another archive.
// This function fully clones the source file's compressed data (no
// recompression), along with its full filename, extra data, and comment fields.
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index);
// Finalizes the archive by writing the central directory records followed by
// the end of central directory record.
// After an archive is finalized, the only valid call on the mz_zip_archive
// struct is mz_zip_writer_end().
// An archive must be manually finalized by calling this function for it to be
// valid.
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip);
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize);
// Ends archive writing, freeing all allocations, and closing the output file if
// mz_zip_writer_init_file() was used.
// Note for the archive to be valid, it must have been finalized before ending.
mz_bool mz_zip_writer_end(mz_zip_archive *pZip);
// Misc. high-level helper functions:
// mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically)
// appends a memory blob to a ZIP archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags);
// Reads a single file from an archive into a heap block.
// Returns NULL on failure.
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
// ------------------- Low-level Decompression API Definitions
// Decompression flags used by tinfl_decompress().
// TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and
// ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the
// input is a raw deflate stream.
// TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available
// beyond the end of the supplied input buffer. If clear, the input buffer
// contains all remaining input.
// TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large
// enough to hold the entire decompressed stream. If clear, the output buffer is
// at least the size of the dictionary (typically 32KB).
// TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the
// decompressed bytes.
enum {
TINFL_FLAG_PARSE_ZLIB_HEADER = 1,
TINFL_FLAG_HAS_MORE_INPUT = 2,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4,
TINFL_FLAG_COMPUTE_ADLER32 = 8
};
// High level decompression functions:
// tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data
// to decompress.
// On return:
// Function returns a pointer to the decompressed data, or NULL on failure.
// *pOut_len will be set to the decompressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must call mz_free() on the returned block when it's no longer
// needed.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tinfl_decompress_mem_to_mem() decompresses a block in memory to another block
// in memory.
// Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes
// written on success.
#define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1))
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// tinfl_decompress_mem_to_callback() decompresses a block in memory to an
// internal 32KB buffer, and a user provided callback function will be called to
// flush the buffer.
// Returns 1 on success or 0 on failure.
typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser);
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
struct tinfl_decompressor_tag;
typedef struct tinfl_decompressor_tag tinfl_decompressor;
// Max size of LZ dictionary.
#define TINFL_LZ_DICT_SIZE 32768
// Return status.
typedef enum {
TINFL_STATUS_BAD_PARAM = -3,
TINFL_STATUS_ADLER32_MISMATCH = -2,
TINFL_STATUS_FAILED = -1,
TINFL_STATUS_DONE = 0,
TINFL_STATUS_NEEDS_MORE_INPUT = 1,
TINFL_STATUS_HAS_MORE_OUTPUT = 2
} tinfl_status;
// Initializes the decompressor to its initial state.
#define tinfl_init(r) \
do { \
(r)->m_state = 0; \
} \
MZ_MACRO_END
#define tinfl_get_adler32(r) (r)->m_check_adler32
// Main low-level decompressor coroutine function. This is the only function
// actually needed for decompression. All the other functions are just
// high-level helpers for improved usability.
// This is a universal API, i.e. it can be used as a building block to build any
// desired higher level decompression API. In the limit case, it can be called
// once per every byte input or output.
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags);
// Internal/private bits follow.
enum {
TINFL_MAX_HUFF_TABLES = 3,
TINFL_MAX_HUFF_SYMBOLS_0 = 288,
TINFL_MAX_HUFF_SYMBOLS_1 = 32,
TINFL_MAX_HUFF_SYMBOLS_2 = 19,
TINFL_FAST_LOOKUP_BITS = 10,
TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS
};
typedef struct {
mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0];
mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE],
m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2];
} tinfl_huff_table;
#if MINIZ_HAS_64BIT_REGISTERS
#define TINFL_USE_64BIT_BITBUF 1
#endif
#if TINFL_USE_64BIT_BITBUF
typedef mz_uint64 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (64)
#else
typedef mz_uint32 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (32)
#endif
struct tinfl_decompressor_tag {
mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type,
m_check_adler32, m_dist, m_counter, m_num_extra,
m_table_sizes[TINFL_MAX_HUFF_TABLES];
tinfl_bit_buf_t m_bit_buf;
size_t m_dist_from_out_buf_start;
tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES];
mz_uint8 m_raw_header[4],
m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137];
};
// ------------------- Low-level Compression API Definitions
// Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly
// slower, and raw/dynamic blocks will be output more frequently).
#define TDEFL_LESS_MEMORY 0
// tdefl_init() compression flags logically OR'd together (low 12 bits contain
// the max. number of probes per dictionary search):
// TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes
// per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap
// compression), 4095=Huffman+LZ (slowest/best compression).
enum {
TDEFL_HUFFMAN_ONLY = 0,
TDEFL_DEFAULT_MAX_PROBES = 128,
TDEFL_MAX_PROBES_MASK = 0xFFF
};
// TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before
// the deflate data, and the Adler-32 of the source data at the end. Otherwise,
// you'll get raw deflate data.
// TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even
// when not writing zlib headers).
// TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more
// efficient lazy parsing.
// TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's
// initialization time to the minimum, but the output may vary from run to run
// given the same input (depending on the contents of memory).
// TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1)
// TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled.
// TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables.
// TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks.
// The low 12 bits are reserved to control the max # of hash probes per
// dictionary lookup (see TDEFL_MAX_PROBES_MASK).
enum {
TDEFL_WRITE_ZLIB_HEADER = 0x01000,
TDEFL_COMPUTE_ADLER32 = 0x02000,
TDEFL_GREEDY_PARSING_FLAG = 0x04000,
TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000,
TDEFL_RLE_MATCHES = 0x10000,
TDEFL_FILTER_MATCHES = 0x20000,
TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000,
TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000
};
// High level compression functions:
// tdefl_compress_mem_to_heap() compresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of source block to compress.
// flags: The max match finder probes (default is 128) logically OR'd against
// the above flags. Higher probes are slower but improve compression.
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pOut_len will be set to the compressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must free() the returned block when it's no longer needed.
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tdefl_compress_mem_to_mem() compresses a block in memory to another block in
// memory.
// Returns 0 on failure.
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// Compresses an image to a compressed PNG file in memory.
// On entry:
// pImage, w, h, and num_chans describe the image to compress. num_chans may be
// 1, 2, 3, or 4.
// The image pitch in bytes per scanline will be w*num_chans. The leftmost
// pixel on the top scanline is stored first in memory.
// level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL
// If flip is true, the image will be flipped on the Y axis (useful for OpenGL
// apps).
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pLen_out will be set to the size of the PNG image file.
// The caller must mz_free() the returned heap block (which will typically be
// larger than *pLen_out) when it's no longer needed.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip);
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out);
// Output stream interface. The compressor uses this interface to write
// compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time.
typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len,
void *pUser);
// tdefl_compress_mem_to_output() compresses a block to an output stream. The
// above helpers use this function internally.
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
enum {
TDEFL_MAX_HUFF_TABLES = 3,
TDEFL_MAX_HUFF_SYMBOLS_0 = 288,
TDEFL_MAX_HUFF_SYMBOLS_1 = 32,
TDEFL_MAX_HUFF_SYMBOLS_2 = 19,
TDEFL_LZ_DICT_SIZE = 32768,
TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1,
TDEFL_MIN_MATCH_LEN = 3,
TDEFL_MAX_MATCH_LEN = 258
};
// TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed
// output block (using static/fixed Huffman codes).
#if TDEFL_LESS_MEMORY
enum {
TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 12,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#else
enum {
TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 15,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#endif
// The low-level tdefl functions below may be used directly if the above helper
// functions aren't flexible enough. The low-level functions don't make any heap
// allocations, unlike the above helper functions.
typedef enum {
TDEFL_STATUS_BAD_PARAM = -2,
TDEFL_STATUS_PUT_BUF_FAILED = -1,
TDEFL_STATUS_OKAY = 0,
TDEFL_STATUS_DONE = 1,
} tdefl_status;
// Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums
typedef enum {
TDEFL_NO_FLUSH = 0,
TDEFL_SYNC_FLUSH = 2,
TDEFL_FULL_FLUSH = 3,
TDEFL_FINISH = 4
} tdefl_flush;
// tdefl's compression state structure.
typedef struct {
tdefl_put_buf_func_ptr m_pPut_buf_func;
void *m_pPut_buf_user;
mz_uint m_flags, m_max_probes[2];
int m_greedy_parsing;
mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size;
mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end;
mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in,
m_bit_buffer;
mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit,
m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index,
m_wants_to_finish;
tdefl_status m_prev_return_status;
const void *m_pIn_buf;
void *m_pOut_buf;
size_t *m_pIn_buf_size, *m_pOut_buf_size;
tdefl_flush m_flush;
const mz_uint8 *m_pSrc;
size_t m_src_buf_left, m_out_buf_ofs;
mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1];
mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE];
mz_uint16 m_next[TDEFL_LZ_DICT_SIZE];
mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE];
mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE];
} tdefl_compressor;
// Initializes the compressor.
// There is no corresponding deinit() function because the tdefl API's do not
// dynamically allocate memory.
// pBut_buf_func: If NULL, output data will be supplied to the specified
// callback. In this case, the user should call the tdefl_compress_buffer() API
// for compression.
// If pBut_buf_func is NULL the user should always call the tdefl_compress()
// API.
// flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER,
// etc.)
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
// Compresses a block of data, consuming as much of the specified input buffer
// as possible, and writing as much compressed data to the specified output
// buffer as possible.
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush);
// tdefl_compress_buffer() is only usable when the tdefl_init() is called with a
// non-NULL tdefl_put_buf_func_ptr.
// tdefl_compress_buffer() always consumes the entire input buffer.
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush);
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d);
mz_uint32 tdefl_get_adler32(tdefl_compressor *d);
// Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't
// defined, because it uses some of its macros.
#ifndef MINIZ_NO_ZLIB_APIS
// Create tdefl_compress() flags given zlib-style compression parameters.
// level may range from [0,10] (where 10 is absolute max compression, but may be
// much slower on some files)
// window_bits may be -15 (raw deflate) or 15 (zlib)
// strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY,
// MZ_RLE, or MZ_FIXED
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy);
#endif // #ifndef MINIZ_NO_ZLIB_APIS
#ifdef __cplusplus
}
#endif
#endif // MINIZ_HEADER_INCLUDED
// ------------------- End of Header: Implementation follows. (If you only want
// the header, define MINIZ_HEADER_FILE_ONLY.)
#ifndef MINIZ_HEADER_FILE_ONLY
typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1];
typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1];
typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1];
#include <string.h>
#include <assert.h>
#define MZ_ASSERT(x) assert(x)
#ifdef MINIZ_NO_MALLOC
#define MZ_MALLOC(x) NULL
#define MZ_FREE(x) (void) x, ((void)0)
#define MZ_REALLOC(p, x) NULL
#else
#define MZ_MALLOC(x) malloc(x)
#define MZ_FREE(x) free(x)
#define MZ_REALLOC(p, x) realloc(p, x)
#endif
#define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b))
#define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj))
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
#define MZ_READ_LE16(p) *((const mz_uint16 *)(p))
#define MZ_READ_LE32(p) *((const mz_uint32 *)(p))
#else
#define MZ_READ_LE16(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U))
#define MZ_READ_LE32(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \
((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \
((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U))
#endif
#ifdef _MSC_VER
#define MZ_FORCEINLINE __forceinline
#elif defined(__GNUC__)
#define MZ_FORCEINLINE inline __attribute__((__always_inline__))
#else
#define MZ_FORCEINLINE inline
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API's
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) {
mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16);
size_t block_len = buf_len % 5552;
if (!ptr)
return MZ_ADLER32_INIT;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i)
s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
return (s2 << 16) + s1;
}
// Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C
// implementation that balances processor cache usage against speed":
// http://www.geocities.com/malbrain/
mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) {
static const mz_uint32 s_crc32[16] = {
0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4,
0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c};
mz_uint32 crcu32 = (mz_uint32)crc;
if (!ptr)
return MZ_CRC32_INIT;
crcu32 = ~crcu32;
while (buf_len--) {
mz_uint8 b = *ptr++;
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)];
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)];
}
return ~crcu32;
}
void mz_free(void *p) { MZ_FREE(p); }
#ifndef MINIZ_NO_ZLIB_APIS
static void *def_alloc_func(void *opaque, size_t items, size_t size) {
(void)opaque, (void)items, (void)size;
return MZ_MALLOC(items * size);
}
static void def_free_func(void *opaque, void *address) {
(void)opaque, (void)address;
MZ_FREE(address);
}
static void *def_realloc_func(void *opaque, void *address, size_t items,
size_t size) {
(void)opaque, (void)address, (void)items, (void)size;
return MZ_REALLOC(address, items * size);
}
const char *mz_version(void) { return MZ_VERSION; }
int mz_deflateInit(mz_streamp pStream, int level) {
return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9,
MZ_DEFAULT_STRATEGY);
}
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy) {
tdefl_compressor *pComp;
mz_uint comp_flags =
TDEFL_COMPUTE_ADLER32 |
tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy);
if (!pStream)
return MZ_STREAM_ERROR;
if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) ||
((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS)))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = MZ_ADLER32_INIT;
pStream->msg = NULL;
pStream->reserved = 0;
pStream->total_in = 0;
pStream->total_out = 0;
if (!pStream->zalloc)
pStream->zalloc = def_alloc_func;
if (!pStream->zfree)
pStream->zfree = def_free_func;
pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1,
sizeof(tdefl_compressor));
if (!pComp)
return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pComp;
if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) {
mz_deflateEnd(pStream);
return MZ_PARAM_ERROR;
}
return MZ_OK;
}
int mz_deflateReset(mz_streamp pStream) {
if ((!pStream) || (!pStream->state) || (!pStream->zalloc) ||
(!pStream->zfree))
return MZ_STREAM_ERROR;
pStream->total_in = pStream->total_out = 0;
tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL,
((tdefl_compressor *)pStream->state)->m_flags);
return MZ_OK;
}
int mz_deflate(mz_streamp pStream, int flush) {
size_t in_bytes, out_bytes;
mz_ulong orig_total_in, orig_total_out;
int mz_status = MZ_OK;
if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) ||
(!pStream->next_out))
return MZ_STREAM_ERROR;
if (!pStream->avail_out)
return MZ_BUF_ERROR;
if (flush == MZ_PARTIAL_FLUSH)
flush = MZ_SYNC_FLUSH;
if (((tdefl_compressor *)pStream->state)->m_prev_return_status ==
TDEFL_STATUS_DONE)
return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR;
orig_total_in = pStream->total_in;
orig_total_out = pStream->total_out;
for (;;) {
tdefl_status defl_status;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
defl_status = tdefl_compress((tdefl_compressor *)pStream->state,
pStream->next_in, &in_bytes, pStream->next_out,
&out_bytes, (tdefl_flush)flush);
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (defl_status < 0) {
mz_status = MZ_STREAM_ERROR;
break;
} else if (defl_status == TDEFL_STATUS_DONE) {
mz_status = MZ_STREAM_END;
break;
} else if (!pStream->avail_out)
break;
else if ((!pStream->avail_in) && (flush != MZ_FINISH)) {
if ((flush) || (pStream->total_in != orig_total_in) ||
(pStream->total_out != orig_total_out))
break;
return MZ_BUF_ERROR; // Can't make forward progress without some input.
}
}
return mz_status;
}
int mz_deflateEnd(mz_streamp pStream) {
if (!pStream)
return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) {
(void)pStream;
// This is really over conservative. (And lame, but it's actually pretty
// tricky to compute a true upper bound given the way tdefl's blocking works.)
return MZ_MAX(128 + (source_len * 110) / 100,
128 + source_len + ((source_len / (31 * 1024)) + 1) * 5);
}
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level) {
int status;
mz_stream stream;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU)
return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_deflateInit(&stream, level);
if (status != MZ_OK)
return status;
status = mz_deflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_deflateEnd(&stream);
return (status == MZ_OK) ? MZ_BUF_ERROR : status;
}
*pDest_len = stream.total_out;
return mz_deflateEnd(&stream);
}
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
return mz_compress2(pDest, pDest_len, pSource, source_len,
MZ_DEFAULT_COMPRESSION);
}
mz_ulong mz_compressBound(mz_ulong source_len) {
return mz_deflateBound(NULL, source_len);
}
typedef struct {
tinfl_decompressor m_decomp;
mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed;
int m_window_bits;
mz_uint8 m_dict[TINFL_LZ_DICT_SIZE];
tinfl_status m_last_status;
} inflate_state;
int mz_inflateInit2(mz_streamp pStream, int window_bits) {
inflate_state *pDecomp;
if (!pStream)
return MZ_STREAM_ERROR;
if ((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = 0;
pStream->msg = NULL;
pStream->total_in = 0;
pStream->total_out = 0;
pStream->reserved = 0;
if (!pStream->zalloc)
pStream->zalloc = def_alloc_func;
if (!pStream->zfree)
pStream->zfree = def_free_func;
pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1,
sizeof(inflate_state));
if (!pDecomp)
return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pDecomp;
tinfl_init(&pDecomp->m_decomp);
pDecomp->m_dict_ofs = 0;
pDecomp->m_dict_avail = 0;
pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT;
pDecomp->m_first_call = 1;
pDecomp->m_has_flushed = 0;
pDecomp->m_window_bits = window_bits;
return MZ_OK;
}
int mz_inflateInit(mz_streamp pStream) {
return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS);
}
int mz_inflate(mz_streamp pStream, int flush) {
inflate_state *pState;
mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32;
size_t in_bytes, out_bytes, orig_avail_in;
tinfl_status status;
if ((!pStream) || (!pStream->state))
return MZ_STREAM_ERROR;
if (flush == MZ_PARTIAL_FLUSH)
flush = MZ_SYNC_FLUSH;
if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH))
return MZ_STREAM_ERROR;
pState = (inflate_state *)pStream->state;
if (pState->m_window_bits > 0)
decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER;
orig_avail_in = pStream->avail_in;
first_call = pState->m_first_call;
pState->m_first_call = 0;
if (pState->m_last_status < 0)
return MZ_DATA_ERROR;
if (pState->m_has_flushed && (flush != MZ_FINISH))
return MZ_STREAM_ERROR;
pState->m_has_flushed |= (flush == MZ_FINISH);
if ((flush == MZ_FINISH) && (first_call)) {
// MZ_FINISH on the first call implies that the input and output buffers are
// large enough to hold the entire compressed/decompressed file.
decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes,
pStream->next_out, pStream->next_out, &out_bytes,
decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (status < 0)
return MZ_DATA_ERROR;
else if (status != TINFL_STATUS_DONE) {
pState->m_last_status = TINFL_STATUS_FAILED;
return MZ_BUF_ERROR;
}
return MZ_STREAM_END;
}
// flush != MZ_FINISH then we must assume there's more input.
if (flush != MZ_FINISH)
decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT;
if (pState->m_dict_avail) {
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
return ((pState->m_last_status == TINFL_STATUS_DONE) &&
(!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
for (;;) {
in_bytes = pStream->avail_in;
out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs;
status = tinfl_decompress(
&pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict,
pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pState->m_dict_avail = (mz_uint)out_bytes;
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
if (status < 0)
return MZ_DATA_ERROR; // Stream is corrupted (there could be some
// uncompressed data left in the output dictionary -
// oh well).
else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in))
return MZ_BUF_ERROR; // Signal caller that we can't make forward progress
// without supplying more input or by setting flush
// to MZ_FINISH.
else if (flush == MZ_FINISH) {
// The output buffer MUST be large to hold the remaining uncompressed data
// when flush==MZ_FINISH.
if (status == TINFL_STATUS_DONE)
return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END;
// status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's
// at least 1 more byte on the way. If there's no more room left in the
// output buffer then something is wrong.
else if (!pStream->avail_out)
return MZ_BUF_ERROR;
} else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) ||
(!pStream->avail_out) || (pState->m_dict_avail))
break;
}
return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
int mz_inflateEnd(mz_streamp pStream) {
if (!pStream)
return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
mz_stream stream;
int status;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU)
return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_inflateInit(&stream);
if (status != MZ_OK)
return status;
status = mz_inflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_inflateEnd(&stream);
return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR
: status;
}
*pDest_len = stream.total_out;
return mz_inflateEnd(&stream);
}
const char *mz_error(int err) {
static struct {
int m_err;
const char *m_pDesc;
} s_error_descs[] = {{MZ_OK, ""},
{MZ_STREAM_END, "stream end"},
{MZ_NEED_DICT, "need dictionary"},
{MZ_ERRNO, "file error"},
{MZ_STREAM_ERROR, "stream error"},
{MZ_DATA_ERROR, "data error"},
{MZ_MEM_ERROR, "out of memory"},
{MZ_BUF_ERROR, "buf error"},
{MZ_VERSION_ERROR, "version error"},
{MZ_PARAM_ERROR, "parameter error"}};
mz_uint i;
for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i)
if (s_error_descs[i].m_err == err)
return s_error_descs[i].m_pDesc;
return NULL;
}
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Low-level Decompression (completely independent from all
// compression API's)
#define TINFL_MEMCPY(d, s, l) memcpy(d, s, l)
#define TINFL_MEMSET(p, c, l) memset(p, c, l)
#define TINFL_CR_BEGIN \
switch (r->m_state) { \
case 0:
#define TINFL_CR_RETURN(state_index, result) \
do { \
status = result; \
r->m_state = state_index; \
goto common_exit; \
case state_index: \
; \
} \
MZ_MACRO_END
#define TINFL_CR_RETURN_FOREVER(state_index, result) \
do { \
for (;;) { \
TINFL_CR_RETURN(state_index, result); \
} \
} \
MZ_MACRO_END
#define TINFL_CR_FINISH }
// TODO: If the caller has indicated that there's no more input, and we attempt
// to read beyond the input buf, then something is wrong with the input because
// the inflator never
// reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of
// the stream with 0's in this scenario.
#define TINFL_GET_BYTE(state_index, c) \
do { \
if (pIn_buf_cur >= pIn_buf_end) { \
for (;;) { \
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \
TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \
if (pIn_buf_cur < pIn_buf_end) { \
c = *pIn_buf_cur++; \
break; \
} \
} else { \
c = 0; \
break; \
} \
} \
} else \
c = *pIn_buf_cur++; \
} \
MZ_MACRO_END
#define TINFL_NEED_BITS(state_index, n) \
do { \
mz_uint c; \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < (mz_uint)(n))
#define TINFL_SKIP_BITS(state_index, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
#define TINFL_GET_BITS(state_index, b, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
b = bit_buf & ((1 << (n)) - 1); \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
// TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes
// remaining in the input buffer falls below 2.
// It reads just enough bytes from the input stream that are needed to decode
// the next Huffman code (and absolutely no more). It works by trying to fully
// decode a
// Huffman code by using whatever bits are currently present in the bit buffer.
// If this fails, it reads another byte, and tries again until it succeeds or
// until the
// bit buffer contains >=15 bits (deflate's max. Huffman code size).
#define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \
do { \
temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \
if (temp >= 0) { \
code_len = temp >> 9; \
if ((code_len) && (num_bits >= code_len)) \
break; \
} else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while ((temp < 0) && (num_bits >= (code_len + 1))); \
if (temp >= 0) \
break; \
} \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < 15);
// TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex
// than you would initially expect because the zlib API expects the decompressor
// to never read
// beyond the final byte of the deflate stream. (In other words, when this macro
// wants to read another byte from the input, it REALLY needs another byte in
// order to fully
// decode the next Huffman code.) Handling this properly is particularly
// important on raw deflate (non-zlib) streams, which aren't followed by a byte
// aligned adler-32.
// The slow path is only executed at the very end of the input buffer.
#define TINFL_HUFF_DECODE(state_index, sym, pHuff) \
do { \
int temp; \
mz_uint code_len, c; \
if (num_bits < 15) { \
if ((pIn_buf_end - pIn_buf_cur) < 2) { \
TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \
} else { \
bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \
(((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \
pIn_buf_cur += 2; \
num_bits += 16; \
} \
} \
if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \
0) \
code_len = temp >> 9, temp &= 511; \
else { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while (temp < 0); \
} \
sym = temp; \
bit_buf >>= code_len; \
num_bits -= code_len; \
} \
MZ_MACRO_END
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags) {
static const int s_length_base[31] = {
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
static const int s_length_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4,
4, 4, 5, 5, 5, 5, 0, 0, 0};
static const int s_dist_base[32] = {
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33,
49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537,
2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0};
static const int s_dist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 13, 13};
static const mz_uint8 s_length_dezigzag[19] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static const int s_min_table_sizes[3] = {257, 1, 4};
tinfl_status status = TINFL_STATUS_FAILED;
mz_uint32 num_bits, dist, counter, num_extra;
tinfl_bit_buf_t bit_buf;
const mz_uint8 *pIn_buf_cur = pIn_buf_next,
*const pIn_buf_end = pIn_buf_next + *pIn_buf_size;
mz_uint8 *pOut_buf_cur = pOut_buf_next,
*const pOut_buf_end = pOut_buf_next + *pOut_buf_size;
size_t out_buf_size_mask =
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)
? (size_t)-1
: ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1,
dist_from_out_buf_start;
// Ensure the output buffer's size is a power of 2, unless the output buffer
// is large enough to hold the entire output file (in which case it doesn't
// matter).
if (((out_buf_size_mask + 1) & out_buf_size_mask) ||
(pOut_buf_next < pOut_buf_start)) {
*pIn_buf_size = *pOut_buf_size = 0;
return TINFL_STATUS_BAD_PARAM;
}
num_bits = r->m_num_bits;
bit_buf = r->m_bit_buf;
dist = r->m_dist;
counter = r->m_counter;
num_extra = r->m_num_extra;
dist_from_out_buf_start = r->m_dist_from_out_buf_start;
TINFL_CR_BEGIN
bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0;
r->m_z_adler32 = r->m_check_adler32 = 1;
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_GET_BYTE(1, r->m_zhdr0);
TINFL_GET_BYTE(2, r->m_zhdr1);
counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) ||
(r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8));
if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))
counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) ||
((out_buf_size_mask + 1) <
(size_t)(1U << (8U + (r->m_zhdr0 >> 4)))));
if (counter) {
TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED);
}
}
do {
TINFL_GET_BITS(3, r->m_final, 3);
r->m_type = r->m_final >> 1;
if (r->m_type == 0) {
TINFL_SKIP_BITS(5, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
if (num_bits)
TINFL_GET_BITS(6, r->m_raw_header[counter], 8);
else
TINFL_GET_BYTE(7, r->m_raw_header[counter]);
}
if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) !=
(mz_uint)(0xFFFF ^
(r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) {
TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED);
}
while ((counter) && (num_bits)) {
TINFL_GET_BITS(51, dist, 8);
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)dist;
counter--;
}
while (counter) {
size_t n;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT);
}
while (pIn_buf_cur >= pIn_buf_end) {
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) {
TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT);
} else {
TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED);
}
}
n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur),
(size_t)(pIn_buf_end - pIn_buf_cur)),
counter);
TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n);
pIn_buf_cur += n;
pOut_buf_cur += n;
counter -= (mz_uint)n;
}
} else if (r->m_type == 3) {
TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED);
} else {
if (r->m_type == 1) {
mz_uint8 *p = r->m_tables[0].m_code_size;
mz_uint i;
r->m_table_sizes[0] = 288;
r->m_table_sizes[1] = 32;
TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32);
for (i = 0; i <= 143; ++i)
*p++ = 8;
for (; i <= 255; ++i)
*p++ = 9;
for (; i <= 279; ++i)
*p++ = 7;
for (; i <= 287; ++i)
*p++ = 8;
} else {
for (counter = 0; counter < 3; counter++) {
TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]);
r->m_table_sizes[counter] += s_min_table_sizes[counter];
}
MZ_CLEAR_OBJ(r->m_tables[2].m_code_size);
for (counter = 0; counter < r->m_table_sizes[2]; counter++) {
mz_uint s;
TINFL_GET_BITS(14, s, 3);
r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s;
}
r->m_table_sizes[2] = 19;
}
for (; (int)r->m_type >= 0; r->m_type--) {
int tree_next, tree_cur;
tinfl_huff_table *pTable;
mz_uint i, j, used_syms, total, sym_index, next_code[17],
total_syms[16];
pTable = &r->m_tables[r->m_type];
MZ_CLEAR_OBJ(total_syms);
MZ_CLEAR_OBJ(pTable->m_look_up);
MZ_CLEAR_OBJ(pTable->m_tree);
for (i = 0; i < r->m_table_sizes[r->m_type]; ++i)
total_syms[pTable->m_code_size[i]]++;
used_syms = 0, total = 0;
next_code[0] = next_code[1] = 0;
for (i = 1; i <= 15; ++i) {
used_syms += total_syms[i];
next_code[i + 1] = (total = ((total + total_syms[i]) << 1));
}
if ((65536 != total) && (used_syms > 1)) {
TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED);
}
for (tree_next = -1, sym_index = 0;
sym_index < r->m_table_sizes[r->m_type]; ++sym_index) {
mz_uint rev_code = 0, l, cur_code,
code_size = pTable->m_code_size[sym_index];
if (!code_size)
continue;
cur_code = next_code[code_size]++;
for (l = code_size; l > 0; l--, cur_code >>= 1)
rev_code = (rev_code << 1) | (cur_code & 1);
if (code_size <= TINFL_FAST_LOOKUP_BITS) {
mz_int16 k = (mz_int16)((code_size << 9) | sym_index);
while (rev_code < TINFL_FAST_LOOKUP_SIZE) {
pTable->m_look_up[rev_code] = k;
rev_code += (1 << code_size);
}
continue;
}
if (0 ==
(tree_cur = pTable->m_look_up[rev_code &
(TINFL_FAST_LOOKUP_SIZE - 1)])) {
pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] =
(mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
}
rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1);
for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) {
tree_cur -= ((rev_code >>= 1) & 1);
if (!pTable->m_tree[-tree_cur - 1]) {
pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
} else
tree_cur = pTable->m_tree[-tree_cur - 1];
}
tree_cur -= ((rev_code >>= 1) & 1);
pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index;
}
if (r->m_type == 2) {
for (counter = 0;
counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) {
mz_uint s;
TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]);
if (dist < 16) {
r->m_len_codes[counter++] = (mz_uint8)dist;
continue;
}
if ((dist == 16) && (!counter)) {
TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED);
}
num_extra = "\02\03\07"[dist - 16];
TINFL_GET_BITS(18, s, num_extra);
s += "\03\03\013"[dist - 16];
TINFL_MEMSET(r->m_len_codes + counter,
(dist == 16) ? r->m_len_codes[counter - 1] : 0, s);
counter += s;
}
if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) {
TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED);
}
TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes,
r->m_table_sizes[0]);
TINFL_MEMCPY(r->m_tables[1].m_code_size,
r->m_len_codes + r->m_table_sizes[0],
r->m_table_sizes[1]);
}
}
for (;;) {
mz_uint8 *pSrc;
for (;;) {
if (((pIn_buf_end - pIn_buf_cur) < 4) ||
((pOut_buf_end - pOut_buf_cur) < 2)) {
TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]);
if (counter >= 256)
break;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)counter;
} else {
int sym2;
mz_uint code_len;
#if TINFL_USE_64BIT_BITBUF
if (num_bits < 30) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 4;
num_bits += 32;
}
#else
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
counter = sym2;
bit_buf >>= code_len;
num_bits -= code_len;
if (counter & 256)
break;
#if !TINFL_USE_64BIT_BITBUF
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
bit_buf >>= code_len;
num_bits -= code_len;
pOut_buf_cur[0] = (mz_uint8)counter;
if (sym2 & 256) {
pOut_buf_cur++;
counter = sym2;
break;
}
pOut_buf_cur[1] = (mz_uint8)sym2;
pOut_buf_cur += 2;
}
}
if ((counter &= 511) == 256)
break;
num_extra = s_length_extra[counter - 257];
counter = s_length_base[counter - 257];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(25, extra_bits, num_extra);
counter += extra_bits;
}
TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]);
num_extra = s_dist_extra[dist];
dist = s_dist_base[dist];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(27, extra_bits, num_extra);
dist += extra_bits;
}
dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start;
if ((dist > dist_from_out_buf_start) &&
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) {
TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED);
}
pSrc = pOut_buf_start +
((dist_from_out_buf_start - dist) & out_buf_size_mask);
if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) {
while (counter--) {
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ =
pOut_buf_start[(dist_from_out_buf_start++ - dist) &
out_buf_size_mask];
}
continue;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
else if ((counter >= 9) && (counter <= dist)) {
const mz_uint8 *pSrc_end = pSrc + (counter & ~7);
do {
((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0];
((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1];
pOut_buf_cur += 8;
} while ((pSrc += 8) < pSrc_end);
if ((counter &= 7) < 3) {
if (counter) {
pOut_buf_cur[0] = pSrc[0];
if (counter > 1)
pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
continue;
}
}
#endif
do {
pOut_buf_cur[0] = pSrc[0];
pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur[2] = pSrc[2];
pOut_buf_cur += 3;
pSrc += 3;
} while ((int)(counter -= 3) > 2);
if ((int)counter > 0) {
pOut_buf_cur[0] = pSrc[0];
if ((int)counter > 1)
pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
}
}
} while (!(r->m_final & 1));
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_SKIP_BITS(32, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
mz_uint s;
if (num_bits)
TINFL_GET_BITS(41, s, 8);
else
TINFL_GET_BYTE(42, s);
r->m_z_adler32 = (r->m_z_adler32 << 8) | s;
}
}
TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE);
TINFL_CR_FINISH
common_exit:
r->m_num_bits = num_bits;
r->m_bit_buf = bit_buf;
r->m_dist = dist;
r->m_counter = counter;
r->m_num_extra = num_extra;
r->m_dist_from_out_buf_start = dist_from_out_buf_start;
*pIn_buf_size = pIn_buf_cur - pIn_buf_next;
*pOut_buf_size = pOut_buf_cur - pOut_buf_next;
if ((decomp_flags &
(TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) &&
(status >= 0)) {
const mz_uint8 *ptr = pOut_buf_next;
size_t buf_len = *pOut_buf_size;
mz_uint32 i, s1 = r->m_check_adler32 & 0xffff,
s2 = r->m_check_adler32 >> 16;
size_t block_len = buf_len % 5552;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i)
s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
r->m_check_adler32 = (s2 << 16) + s1;
if ((status == TINFL_STATUS_DONE) &&
(decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) &&
(r->m_check_adler32 != r->m_z_adler32))
status = TINFL_STATUS_ADLER32_MISMATCH;
}
return status;
}
// Higher level helper functions.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tinfl_decompressor decomp;
void *pBuf = NULL, *pNew_buf;
size_t src_buf_ofs = 0, out_buf_capacity = 0;
*pOut_len = 0;
tinfl_init(&decomp);
for (;;) {
size_t src_buf_size = src_buf_len - src_buf_ofs,
dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity;
tinfl_status status = tinfl_decompress(
&decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size,
(mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL,
&dst_buf_size, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
src_buf_ofs += src_buf_size;
*pOut_len += dst_buf_size;
if (status == TINFL_STATUS_DONE)
break;
new_out_buf_capacity = out_buf_capacity * 2;
if (new_out_buf_capacity < 128)
new_out_buf_capacity = 128;
pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity);
if (!pNew_buf) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
pBuf = pNew_buf;
out_buf_capacity = new_out_buf_capacity;
}
return pBuf;
}
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tinfl_decompressor decomp;
tinfl_status status;
tinfl_init(&decomp);
status =
tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len,
(mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len,
(flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED
: out_buf_len;
}
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
int result = 0;
tinfl_decompressor decomp;
mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE);
size_t in_buf_ofs = 0, dict_ofs = 0;
if (!pDict)
return TINFL_STATUS_FAILED;
tinfl_init(&decomp);
for (;;) {
size_t in_buf_size = *pIn_buf_size - in_buf_ofs,
dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs;
tinfl_status status =
tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs,
&in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size,
(flags &
~(TINFL_FLAG_HAS_MORE_INPUT |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)));
in_buf_ofs += in_buf_size;
if ((dst_buf_size) &&
(!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user)))
break;
if (status != TINFL_STATUS_HAS_MORE_OUTPUT) {
result = (status == TINFL_STATUS_DONE);
break;
}
dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1);
}
MZ_FREE(pDict);
*pIn_buf_size = in_buf_ofs;
return result;
}
// ------------------- Low-level Compression (independent from all decompression
// API's)
// Purposely making these tables static for faster init and thread safety.
static const mz_uint16 s_tdefl_len_sym[256] = {
257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268,
268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272,
272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274,
274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276,
276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279,
279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280,
280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
285};
static const mz_uint8 s_tdefl_len_extra[256] = {
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0};
static const mz_uint8 s_tdefl_small_dist_sym[512] = {
0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8,
8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17};
static const mz_uint8 s_tdefl_small_dist_extra[512] = {
0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7};
static const mz_uint8 s_tdefl_large_dist_sym[128] = {
0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24,
24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29};
static const mz_uint8 s_tdefl_large_dist_extra[128] = {
0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13};
// Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted
// values.
typedef struct { mz_uint16 m_key, m_sym_index; } tdefl_sym_freq;
static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms,
tdefl_sym_freq *pSyms0,
tdefl_sym_freq *pSyms1) {
mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2];
tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1;
MZ_CLEAR_OBJ(hist);
for (i = 0; i < num_syms; i++) {
mz_uint freq = pSyms0[i].m_key;
hist[freq & 0xFF]++;
hist[256 + ((freq >> 8) & 0xFF)]++;
}
while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256]))
total_passes--;
for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) {
const mz_uint32 *pHist = &hist[pass << 8];
mz_uint offsets[256], cur_ofs = 0;
for (i = 0; i < 256; i++) {
offsets[i] = cur_ofs;
cur_ofs += pHist[i];
}
for (i = 0; i < num_syms; i++)
pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] =
pCur_syms[i];
{
tdefl_sym_freq *t = pCur_syms;
pCur_syms = pNew_syms;
pNew_syms = t;
}
}
return pCur_syms;
}
// tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat,
// alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996.
static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) {
int root, leaf, next, avbl, used, dpth;
if (n == 0)
return;
else if (n == 1) {
A[0].m_key = 1;
return;
}
A[0].m_key += A[1].m_key;
root = 0;
leaf = 2;
for (next = 1; next < n - 1; next++) {
if (leaf >= n || A[root].m_key < A[leaf].m_key) {
A[next].m_key = A[root].m_key;
A[root++].m_key = (mz_uint16)next;
} else
A[next].m_key = A[leaf++].m_key;
if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) {
A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key);
A[root++].m_key = (mz_uint16)next;
} else
A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key);
}
A[n - 2].m_key = 0;
for (next = n - 3; next >= 0; next--)
A[next].m_key = A[A[next].m_key].m_key + 1;
avbl = 1;
used = dpth = 0;
root = n - 2;
next = n - 1;
while (avbl > 0) {
while (root >= 0 && (int)A[root].m_key == dpth) {
used++;
root--;
}
while (avbl > used) {
A[next--].m_key = (mz_uint16)(dpth);
avbl--;
}
avbl = 2 * used;
dpth++;
used = 0;
}
}
// Limits canonical Huffman code table's max code size.
enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 };
static void tdefl_huffman_enforce_max_code_size(int *pNum_codes,
int code_list_len,
int max_code_size) {
int i;
mz_uint32 total = 0;
if (code_list_len <= 1)
return;
for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++)
pNum_codes[max_code_size] += pNum_codes[i];
for (i = max_code_size; i > 0; i--)
total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i));
while (total != (1UL << max_code_size)) {
pNum_codes[max_code_size]--;
for (i = max_code_size - 1; i > 0; i--)
if (pNum_codes[i]) {
pNum_codes[i]--;
pNum_codes[i + 1] += 2;
break;
}
total--;
}
}
static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num,
int table_len, int code_size_limit,
int static_table) {
int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE];
mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1];
MZ_CLEAR_OBJ(num_codes);
if (static_table) {
for (i = 0; i < table_len; i++)
num_codes[d->m_huff_code_sizes[table_num][i]]++;
} else {
tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS],
*pSyms;
int num_used_syms = 0;
const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0];
for (i = 0; i < table_len; i++)
if (pSym_count[i]) {
syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i];
syms0[num_used_syms++].m_sym_index = (mz_uint16)i;
}
pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1);
tdefl_calculate_minimum_redundancy(pSyms, num_used_syms);
for (i = 0; i < num_used_syms; i++)
num_codes[pSyms[i].m_key]++;
tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms,
code_size_limit);
MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]);
MZ_CLEAR_OBJ(d->m_huff_codes[table_num]);
for (i = 1, j = num_used_syms; i <= code_size_limit; i++)
for (l = num_codes[i]; l > 0; l--)
d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i);
}
next_code[1] = 0;
for (j = 0, i = 2; i <= code_size_limit; i++)
next_code[i] = j = ((j + num_codes[i - 1]) << 1);
for (i = 0; i < table_len; i++) {
mz_uint rev_code = 0, code, code_size;
if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0)
continue;
code = next_code[code_size]++;
for (l = code_size; l > 0; l--, code >>= 1)
rev_code = (rev_code << 1) | (code & 1);
d->m_huff_codes[table_num][i] = (mz_uint16)rev_code;
}
}
#define TDEFL_PUT_BITS(b, l) \
do { \
mz_uint bits = b; \
mz_uint len = l; \
MZ_ASSERT(bits <= ((1U << len) - 1U)); \
d->m_bit_buffer |= (bits << d->m_bits_in); \
d->m_bits_in += len; \
while (d->m_bits_in >= 8) { \
if (d->m_pOutput_buf < d->m_pOutput_buf_end) \
*d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \
d->m_bit_buffer >>= 8; \
d->m_bits_in -= 8; \
} \
} \
MZ_MACRO_END
#define TDEFL_RLE_PREV_CODE_SIZE() \
{ \
if (rle_repeat_count) { \
if (rle_repeat_count < 3) { \
d->m_huff_count[2][prev_code_size] = (mz_uint16)( \
d->m_huff_count[2][prev_code_size] + rle_repeat_count); \
while (rle_repeat_count--) \
packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \
} else { \
d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 16; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_repeat_count - 3); \
} \
rle_repeat_count = 0; \
} \
}
#define TDEFL_RLE_ZERO_CODE_SIZE() \
{ \
if (rle_z_count) { \
if (rle_z_count < 3) { \
d->m_huff_count[2][0] = \
(mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \
while (rle_z_count--) \
packed_code_sizes[num_packed_code_sizes++] = 0; \
} else if (rle_z_count <= 10) { \
d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 17; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 3); \
} else { \
d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 18; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 11); \
} \
rle_z_count = 0; \
} \
}
static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static void tdefl_start_dynamic_block(tdefl_compressor *d) {
int num_lit_codes, num_dist_codes, num_bit_lengths;
mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count,
rle_repeat_count, packed_code_sizes_index;
mz_uint8
code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
prev_code_size = 0xFF;
d->m_huff_count[0][256] = 1;
tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE);
tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE);
for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--)
if (d->m_huff_code_sizes[0][num_lit_codes - 1])
break;
for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--)
if (d->m_huff_code_sizes[1][num_dist_codes - 1])
break;
memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes);
memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0],
num_dist_codes);
total_code_sizes_to_pack = num_lit_codes + num_dist_codes;
num_packed_code_sizes = 0;
rle_z_count = 0;
rle_repeat_count = 0;
memset(&d->m_huff_count[2][0], 0,
sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2);
for (i = 0; i < total_code_sizes_to_pack; i++) {
mz_uint8 code_size = code_sizes_to_pack[i];
if (!code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
if (++rle_z_count == 138) {
TDEFL_RLE_ZERO_CODE_SIZE();
}
} else {
TDEFL_RLE_ZERO_CODE_SIZE();
if (code_size != prev_code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
d->m_huff_count[2][code_size] =
(mz_uint16)(d->m_huff_count[2][code_size] + 1);
packed_code_sizes[num_packed_code_sizes++] = code_size;
} else if (++rle_repeat_count == 6) {
TDEFL_RLE_PREV_CODE_SIZE();
}
}
prev_code_size = code_size;
}
if (rle_repeat_count) {
TDEFL_RLE_PREV_CODE_SIZE();
} else {
TDEFL_RLE_ZERO_CODE_SIZE();
}
tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE);
TDEFL_PUT_BITS(2, 2);
TDEFL_PUT_BITS(num_lit_codes - 257, 5);
TDEFL_PUT_BITS(num_dist_codes - 1, 5);
for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--)
if (d->m_huff_code_sizes
[2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]])
break;
num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1));
TDEFL_PUT_BITS(num_bit_lengths - 4, 4);
for (i = 0; (int)i < num_bit_lengths; i++)
TDEFL_PUT_BITS(
d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3);
for (packed_code_sizes_index = 0;
packed_code_sizes_index < num_packed_code_sizes;) {
mz_uint code = packed_code_sizes[packed_code_sizes_index++];
MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2);
TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]);
if (code >= 16)
TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++],
"\02\03\07"[code - 16]);
}
}
static void tdefl_start_static_block(tdefl_compressor *d) {
mz_uint i;
mz_uint8 *p = &d->m_huff_code_sizes[0][0];
for (i = 0; i <= 143; ++i)
*p++ = 8;
for (; i <= 255; ++i)
*p++ = 9;
for (; i <= 279; ++i)
*p++ = 7;
for (; i <= 287; ++i)
*p++ = 8;
memset(d->m_huff_code_sizes[1], 5, 32);
tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE);
tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE);
TDEFL_PUT_BITS(1, 2);
}
static const mz_uint mz_bitmasks[17] = {
0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF,
0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF};
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \
MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
mz_uint8 *pOutput_buf = d->m_pOutput_buf;
mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf;
mz_uint64 bit_buffer = d->m_bit_buffer;
mz_uint bits_in = d->m_bits_in;
#define TDEFL_PUT_BITS_FAST(b, l) \
{ \
bit_buffer |= (((mz_uint64)(b)) << bits_in); \
bits_in += (l); \
}
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end;
flags >>= 1) {
if (flags == 1)
flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint s0, s1, n0, n1, sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = *(const mz_uint16 *)(pLZ_codes + 1);
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
// This sequence coaxes MSVC into using cmov's vs. jmp's.
s0 = s_tdefl_small_dist_sym[match_dist & 511];
n0 = s_tdefl_small_dist_extra[match_dist & 511];
s1 = s_tdefl_large_dist_sym[match_dist >> 8];
n1 = s_tdefl_large_dist_extra[match_dist >> 8];
sym = (match_dist < 512) ? s0 : s1;
num_extra_bits = (match_dist < 512) ? n0 : n1;
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym],
d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits],
num_extra_bits);
} else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
}
}
}
if (pOutput_buf >= d->m_pOutput_buf_end)
return MZ_FALSE;
*(mz_uint64 *)pOutput_buf = bit_buffer;
pOutput_buf += (bits_in >> 3);
bit_buffer >>= (bits_in & ~7);
bits_in &= 7;
}
#undef TDEFL_PUT_BITS_FAST
d->m_pOutput_buf = pOutput_buf;
d->m_bits_in = 0;
d->m_bit_buffer = 0;
while (bits_in) {
mz_uint32 n = MZ_MIN(bits_in, 16);
TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n);
bit_buffer >>= n;
bits_in -= n;
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#else
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf;
flags >>= 1) {
if (flags == 1)
flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8));
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
if (match_dist < 512) {
sym = s_tdefl_small_dist_sym[match_dist];
num_extra_bits = s_tdefl_small_dist_extra[match_dist];
} else {
sym = s_tdefl_large_dist_sym[match_dist >> 8];
num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8];
}
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits);
} else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
}
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN &&
// MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) {
if (static_block)
tdefl_start_static_block(d);
else
tdefl_start_dynamic_block(d);
return tdefl_compress_lz_codes(d);
}
static int tdefl_flush_block(tdefl_compressor *d, int flush) {
mz_uint saved_bit_buf, saved_bits_in;
mz_uint8 *pSaved_output_buf;
mz_bool comp_block_succeeded = MZ_FALSE;
int n, use_raw_block =
((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) &&
(d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size;
mz_uint8 *pOutput_buf_start =
((d->m_pPut_buf_func == NULL) &&
((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE))
? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs)
: d->m_output_buf;
d->m_pOutput_buf = pOutput_buf_start;
d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16;
MZ_ASSERT(!d->m_output_flush_remaining);
d->m_output_flush_ofs = 0;
d->m_output_flush_remaining = 0;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left);
d->m_pLZ_code_buf -= (d->m_num_flags_left == 8);
if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) {
TDEFL_PUT_BITS(0x78, 8);
TDEFL_PUT_BITS(0x01, 8);
}
TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1);
pSaved_output_buf = d->m_pOutput_buf;
saved_bit_buf = d->m_bit_buffer;
saved_bits_in = d->m_bits_in;
if (!use_raw_block)
comp_block_succeeded =
tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) ||
(d->m_total_lz_bytes < 48));
// If the block gets expanded, forget the current contents of the output
// buffer and send a raw block instead.
if (((use_raw_block) ||
((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >=
d->m_total_lz_bytes))) &&
((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) {
mz_uint i;
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
TDEFL_PUT_BITS(0, 2);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) {
TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16);
}
for (i = 0; i < d->m_total_lz_bytes; ++i) {
TDEFL_PUT_BITS(
d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK],
8);
}
}
// Check for the extremely unlikely (if not impossible) case of the compressed
// block not fitting into the output buffer when using dynamic codes.
else if (!comp_block_succeeded) {
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
tdefl_compress_block(d, MZ_TRUE);
}
if (flush) {
if (flush == TDEFL_FINISH) {
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) {
mz_uint i, a = d->m_adler32;
for (i = 0; i < 4; i++) {
TDEFL_PUT_BITS((a >> 24) & 0xFF, 8);
a <<= 8;
}
}
} else {
mz_uint i, z = 0;
TDEFL_PUT_BITS(0, 3);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, z ^= 0xFFFF) {
TDEFL_PUT_BITS(z & 0xFFFF, 16);
}
}
}
MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end);
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes;
d->m_total_lz_bytes = 0;
d->m_block_index++;
if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) {
if (d->m_pPut_buf_func) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user))
return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED);
} else if (pOutput_buf_start == d->m_output_buf) {
int bytes_to_copy = (int)MZ_MIN(
(size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs));
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf,
bytes_to_copy);
d->m_out_buf_ofs += bytes_to_copy;
if ((n -= bytes_to_copy) != 0) {
d->m_output_flush_ofs = bytes_to_copy;
d->m_output_flush_remaining = n;
}
} else {
d->m_out_buf_ofs += n;
}
}
return d->m_output_flush_remaining;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p)
static MZ_FORCEINLINE void
tdefl_find_match(tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist,
mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q;
mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]),
s01 = TDEFL_READ_UNALIGNED_WORD(s);
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len)
return;
for (;;) {
for (;;) {
if (--num_probes_left == 0)
return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist)
break;
q = (const mz_uint16 *)(d->m_dict + probe_pos);
if (TDEFL_READ_UNALIGNED_WORD(q) != s01)
continue;
p = s;
probe_len = 32;
do {
} while (
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
if (!probe_len) {
*pMatch_dist = dist;
*pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN);
break;
} else if ((probe_len = ((mz_uint)(p - s) * 2) +
(mz_uint)(*(const mz_uint8 *)p ==
*(const mz_uint8 *)q)) > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) ==
max_match_len)
break;
c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]);
}
}
}
#else
static MZ_FORCEINLINE void
tdefl_find_match(tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist,
mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint8 *s = d->m_dict + pos, *p, *q;
mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1];
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len)
return;
for (;;) {
for (;;) {
if (--num_probes_left == 0)
return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if ((d->m_dict[probe_pos + match_len] == c0) && \
(d->m_dict[probe_pos + match_len - 1] == c1)) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist)
break;
p = s;
q = d->m_dict + probe_pos;
for (probe_len = 0; probe_len < max_match_len; probe_len++)
if (*p++ != *q++)
break;
if (probe_len > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = probe_len) == max_match_len)
return;
c0 = d->m_dict[pos + match_len];
c1 = d->m_dict[pos + match_len - 1];
}
}
}
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static mz_bool tdefl_compress_fast(tdefl_compressor *d) {
// Faster, minimally featured LZRW1-style match+parse loop with better
// register utilization. Intended for applications where raw throughput is
// valued more highly than ratio.
mz_uint lookahead_pos = d->m_lookahead_pos,
lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size,
total_lz_bytes = d->m_total_lz_bytes,
num_flags_left = d->m_num_flags_left;
mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags;
mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) {
const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096;
mz_uint dst_pos =
(lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size);
d->m_src_buf_left -= num_bytes_to_process;
lookahead_size += num_bytes_to_process;
while (num_bytes_to_process) {
mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process);
memcpy(d->m_dict + dst_pos, d->m_pSrc, n);
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc,
MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos));
d->m_pSrc += n;
dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK;
num_bytes_to_process -= n;
}
dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size);
if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE))
break;
while (lookahead_size >= 4) {
mz_uint cur_match_dist, cur_match_len = 1;
mz_uint8 *pCur_dict = d->m_dict + cur_pos;
mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF;
mz_uint hash =
(first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) &
TDEFL_LEVEL1_HASH_SIZE_MASK;
mz_uint probe_pos = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)lookahead_pos;
if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <=
dict_size) &&
((*(const mz_uint32 *)(d->m_dict +
(probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) &
0xFFFFFF) == first_trigram)) {
const mz_uint16 *p = (const mz_uint16 *)pCur_dict;
const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos);
mz_uint32 probe_len = 32;
do {
} while ((TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) +
(mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q);
if (!probe_len)
cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0;
if ((cur_match_len < TDEFL_MIN_MATCH_LEN) ||
((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U))) {
cur_match_len = 1;
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
} else {
mz_uint32 s0, s1;
cur_match_len = MZ_MIN(cur_match_len, lookahead_size);
MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 1) &&
(cur_match_dist <= TDEFL_LZ_DICT_SIZE));
cur_match_dist--;
pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN);
*(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist;
pLZ_code_buf += 3;
*pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80);
s0 = s_tdefl_small_dist_sym[cur_match_dist & 511];
s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8];
d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++;
d->m_huff_count[0][s_tdefl_len_sym[cur_match_len -
TDEFL_MIN_MATCH_LEN]]++;
}
} else {
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
}
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
total_lz_bytes += cur_match_len;
lookahead_pos += cur_match_len;
dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK;
MZ_ASSERT(lookahead_size >= cur_match_len);
lookahead_size -= cur_match_len;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
while (lookahead_size) {
mz_uint8 lit = d->m_dict[cur_pos];
total_lz_bytes++;
*pLZ_code_buf++ = lit;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
lookahead_pos++;
dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
lookahead_size--;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
}
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
return MZ_TRUE;
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d,
mz_uint8 lit) {
d->m_total_lz_bytes++;
*d->m_pLZ_code_buf++ = lit;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
}
static MZ_FORCEINLINE void
tdefl_record_match(tdefl_compressor *d, mz_uint match_len, mz_uint match_dist) {
mz_uint32 s0, s1;
MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) &&
(match_dist <= TDEFL_LZ_DICT_SIZE));
d->m_total_lz_bytes += match_len;
d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN);
match_dist -= 1;
d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF);
d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8);
d->m_pLZ_code_buf += 3;
*d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
s0 = s_tdefl_small_dist_sym[match_dist & 511];
s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127];
d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++;
if (match_len >= TDEFL_MIN_MATCH_LEN)
d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++;
}
static mz_bool tdefl_compress_normal(tdefl_compressor *d) {
const mz_uint8 *pSrc = d->m_pSrc;
size_t src_buf_left = d->m_src_buf_left;
tdefl_flush flush = d->m_flush;
while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) {
mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos;
// Update dictionary and hash chains. Keeps the lookahead size equal to
// TDEFL_MAX_MATCH_LEN.
if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) {
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK,
ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2;
mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK];
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size);
const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process;
src_buf_left -= num_bytes_to_process;
d->m_lookahead_size += num_bytes_to_process;
while (pSrc != pSrc_end) {
mz_uint8 c = *pSrc++;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
ins_pos++;
}
} else {
while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) {
mz_uint8 c = *pSrc++;
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK;
src_buf_left--;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) {
mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2;
mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< (TDEFL_LZ_HASH_SHIFT * 2)) ^
(d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
c) &
(TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
}
}
}
d->m_dict_size =
MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size);
if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN))
break;
// Simple lazy/greedy parsing state machine.
len_to_move = 1;
cur_match_dist = 0;
cur_match_len =
d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1);
cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) {
if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) {
mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK];
cur_match_len = 0;
while (cur_match_len < d->m_lookahead_size) {
if (d->m_dict[cur_pos + cur_match_len] != c)
break;
cur_match_len++;
}
if (cur_match_len < TDEFL_MIN_MATCH_LEN)
cur_match_len = 0;
else
cur_match_dist = 1;
}
} else {
tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size,
d->m_lookahead_size, &cur_match_dist, &cur_match_len);
}
if (((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U)) ||
(cur_pos == cur_match_dist) ||
((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) {
cur_match_dist = cur_match_len = 0;
}
if (d->m_saved_match_len) {
if (cur_match_len > d->m_saved_match_len) {
tdefl_record_literal(d, (mz_uint8)d->m_saved_lit);
if (cur_match_len >= 128) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
d->m_saved_match_len = 0;
len_to_move = cur_match_len;
} else {
d->m_saved_lit = d->m_dict[cur_pos];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
} else {
tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist);
len_to_move = d->m_saved_match_len - 1;
d->m_saved_match_len = 0;
}
} else if (!cur_match_dist)
tdefl_record_literal(d,
d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]);
else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) ||
(cur_match_len >= 128)) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
len_to_move = cur_match_len;
} else {
d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
// Move the lookahead forward by len_to_move bytes.
d->m_lookahead_pos += len_to_move;
MZ_ASSERT(d->m_lookahead_size >= len_to_move);
d->m_lookahead_size -= len_to_move;
d->m_dict_size = MZ_MIN(d->m_dict_size + len_to_move, TDEFL_LZ_DICT_SIZE);
// Check if it's time to flush the current LZ codes to the internal output
// buffer.
if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) ||
((d->m_total_lz_bytes > 31 * 1024) &&
(((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >=
d->m_total_lz_bytes) ||
(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) {
int n;
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
}
}
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
return MZ_TRUE;
}
static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) {
if (d->m_pIn_buf_size) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
}
if (d->m_pOut_buf_size) {
size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs,
d->m_output_flush_remaining);
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs,
d->m_output_buf + d->m_output_flush_ofs, n);
d->m_output_flush_ofs += (mz_uint)n;
d->m_output_flush_remaining -= (mz_uint)n;
d->m_out_buf_ofs += n;
*d->m_pOut_buf_size = d->m_out_buf_ofs;
}
return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE
: TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush) {
if (!d) {
if (pIn_buf_size)
*pIn_buf_size = 0;
if (pOut_buf_size)
*pOut_buf_size = 0;
return TDEFL_STATUS_BAD_PARAM;
}
d->m_pIn_buf = pIn_buf;
d->m_pIn_buf_size = pIn_buf_size;
d->m_pOut_buf = pOut_buf;
d->m_pOut_buf_size = pOut_buf_size;
d->m_pSrc = (const mz_uint8 *)(pIn_buf);
d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0;
d->m_out_buf_ofs = 0;
d->m_flush = flush;
if (((d->m_pPut_buf_func != NULL) ==
((pOut_buf != NULL) || (pOut_buf_size != NULL))) ||
(d->m_prev_return_status != TDEFL_STATUS_OKAY) ||
(d->m_wants_to_finish && (flush != TDEFL_FINISH)) ||
(pIn_buf_size && *pIn_buf_size && !pIn_buf) ||
(pOut_buf_size && *pOut_buf_size && !pOut_buf)) {
if (pIn_buf_size)
*pIn_buf_size = 0;
if (pOut_buf_size)
*pOut_buf_size = 0;
return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM);
}
d->m_wants_to_finish |= (flush == TDEFL_FINISH);
if ((d->m_output_flush_remaining) || (d->m_finished))
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) &&
((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) &&
((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS |
TDEFL_RLE_MATCHES)) == 0)) {
if (!tdefl_compress_fast(d))
return d->m_prev_return_status;
} else
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
{
if (!tdefl_compress_normal(d))
return d->m_prev_return_status;
}
if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) &&
(pIn_buf))
d->m_adler32 =
(mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf,
d->m_pSrc - (const mz_uint8 *)pIn_buf);
if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) &&
(!d->m_output_flush_remaining)) {
if (tdefl_flush_block(d, flush) < 0)
return d->m_prev_return_status;
d->m_finished = (flush == TDEFL_FINISH);
if (flush == TDEFL_FULL_FLUSH) {
MZ_CLEAR_OBJ(d->m_hash);
MZ_CLEAR_OBJ(d->m_next);
d->m_dict_size = 0;
}
}
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
}
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush) {
MZ_ASSERT(d->m_pPut_buf_func);
return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush);
}
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
d->m_pPut_buf_func = pPut_buf_func;
d->m_pPut_buf_user = pPut_buf_user;
d->m_flags = (mz_uint)(flags);
d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3;
d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0;
d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3;
if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG))
MZ_CLEAR_OBJ(d->m_hash);
d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size =
d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0;
d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished =
d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0;
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_pOutput_buf = d->m_output_buf;
d->m_pOutput_buf_end = d->m_output_buf;
d->m_prev_return_status = TDEFL_STATUS_OKAY;
d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0;
d->m_adler32 = 1;
d->m_pIn_buf = NULL;
d->m_pOut_buf = NULL;
d->m_pIn_buf_size = NULL;
d->m_pOut_buf_size = NULL;
d->m_flush = TDEFL_NO_FLUSH;
d->m_pSrc = NULL;
d->m_src_buf_left = 0;
d->m_out_buf_ofs = 0;
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
return TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) {
return d->m_prev_return_status;
}
mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; }
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
tdefl_compressor *pComp;
mz_bool succeeded;
if (((buf_len) && (!pBuf)) || (!pPut_buf_func))
return MZ_FALSE;
pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
if (!pComp)
return MZ_FALSE;
succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) ==
TDEFL_STATUS_OKAY);
succeeded =
succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) ==
TDEFL_STATUS_DONE);
MZ_FREE(pComp);
return succeeded;
}
typedef struct {
size_t m_size, m_capacity;
mz_uint8 *m_pBuf;
mz_bool m_expandable;
} tdefl_output_buffer;
static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len,
void *pUser) {
tdefl_output_buffer *p = (tdefl_output_buffer *)pUser;
size_t new_size = p->m_size + len;
if (new_size > p->m_capacity) {
size_t new_capacity = p->m_capacity;
mz_uint8 *pNew_buf;
if (!p->m_expandable)
return MZ_FALSE;
do {
new_capacity = MZ_MAX(128U, new_capacity << 1U);
} while (new_size > new_capacity);
pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity);
if (!pNew_buf)
return MZ_FALSE;
p->m_pBuf = pNew_buf;
p->m_capacity = new_capacity;
}
memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len);
p->m_size = new_size;
return MZ_TRUE;
}
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_len)
return MZ_FALSE;
else
*pOut_len = 0;
out_buf.m_expandable = MZ_TRUE;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return NULL;
*pOut_len = out_buf.m_size;
return out_buf.m_pBuf;
}
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_buf)
return 0;
out_buf.m_pBuf = (mz_uint8 *)pOut_buf;
out_buf.m_capacity = out_buf_len;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return 0;
return out_buf.m_size;
}
#ifndef MINIZ_NO_ZLIB_APIS
static const mz_uint s_tdefl_num_probes[11] = {0, 1, 6, 32, 16, 32,
128, 256, 512, 768, 1500};
// level may actually range from [0,10] (10 is a "hidden" max level, where we
// want a bit more compression and it's fine if throughput to fall off a cliff
// on some files).
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy) {
mz_uint comp_flags =
s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] |
((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0);
if (window_bits > 0)
comp_flags |= TDEFL_WRITE_ZLIB_HEADER;
if (!level)
comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS;
else if (strategy == MZ_FILTERED)
comp_flags |= TDEFL_FILTER_MATCHES;
else if (strategy == MZ_HUFFMAN_ONLY)
comp_flags &= ~TDEFL_MAX_PROBES_MASK;
else if (strategy == MZ_FIXED)
comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS;
else if (strategy == MZ_RLE)
comp_flags |= TDEFL_RLE_MATCHES;
return comp_flags;
}
#endif // MINIZ_NO_ZLIB_APIS
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#endif
// Simple PNG writer function by Alex Evans, 2011. Released into the public
// domain: https://gist.github.com/908299, more context at
// http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/.
// This is actually a modification of Alex's original code so PNG files
// generated by this function pass pngcheck.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip) {
// Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was
// defined.
static const mz_uint s_tdefl_png_num_probes[11] = {
0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500};
tdefl_compressor *pComp =
(tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
tdefl_output_buffer out_buf;
int i, bpl = w * num_chans, y, z;
mz_uint32 c;
*pLen_out = 0;
if (!pComp)
return NULL;
MZ_CLEAR_OBJ(out_buf);
out_buf.m_expandable = MZ_TRUE;
out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h);
if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) {
MZ_FREE(pComp);
return NULL;
}
// write dummy header
for (z = 41; z; --z)
tdefl_output_buffer_putter(&z, 1, &out_buf);
// compress image data
tdefl_init(pComp, tdefl_output_buffer_putter, &out_buf,
s_tdefl_png_num_probes[MZ_MIN(10, level)] |
TDEFL_WRITE_ZLIB_HEADER);
for (y = 0; y < h; ++y) {
tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH);
tdefl_compress_buffer(pComp,
(mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl,
bpl, TDEFL_NO_FLUSH);
}
if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) !=
TDEFL_STATUS_DONE) {
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
// write real header
*pLen_out = out_buf.m_size - 41;
{
static const mz_uint8 chans[] = {0x00, 0x00, 0x04, 0x02, 0x06};
mz_uint8 pnghdr[41] = {
0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d,
0x49, 0x48, 0x44, 0x52, 0, 0, (mz_uint8)(w >> 8), (mz_uint8)w, 0, 0,
(mz_uint8)(h >> 8), (mz_uint8)h, 8, chans[num_chans], 0, 0, 0, 0, 0, 0,
0, (mz_uint8)(*pLen_out >> 24), (mz_uint8)(*pLen_out >> 16),
(mz_uint8)(*pLen_out >> 8), (mz_uint8)*pLen_out, 0x49, 0x44, 0x41,
0x54};
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17);
for (i = 0; i < 4; ++i, c <<= 8)
((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24);
memcpy(out_buf.m_pBuf, pnghdr, 41);
}
// write footer (IDAT CRC-32, followed by IEND chunk)
if (!tdefl_output_buffer_putter(
"\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) {
*pLen_out = 0;
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4,
*pLen_out + 4);
for (i = 0; i < 4; ++i, c <<= 8)
(out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24);
// compute final size of file, grab compressed data buffer and return
*pLen_out += 57;
MZ_FREE(pComp);
return out_buf.m_pBuf;
}
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out) {
// Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we
// can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's
// where #defined out)
return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans,
pLen_out, 6, MZ_FALSE);
}
#ifdef _MSC_VER
#pragma warning(pop)
#endif
// ------------------- .ZIP archive reading
#ifndef MINIZ_NO_ARCHIVE_APIS
#ifdef MINIZ_NO_STDIO
#define MZ_FILE void *
#else
#include <stdio.h>
#include <sys/stat.h>
#if defined(_MSC_VER) || defined(__MINGW64__)
static FILE *mz_fopen(const char *pFilename, const char *pMode) {
FILE *pFile = NULL;
fopen_s(&pFile, pFilename, pMode);
return pFile;
}
static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) {
FILE *pFile = NULL;
if (freopen_s(&pFile, pPath, pMode, pStream))
return NULL;
return pFile;
}
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN mz_fopen
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 _ftelli64
#define MZ_FSEEK64 _fseeki64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN mz_freopen
#define MZ_DELETE_FILE remove
#elif defined(__MINGW32__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__TINYC__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftell
#define MZ_FSEEK64 fseek
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__GNUC__) && _LARGEFILE64_SOURCE
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen64(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT stat64
#define MZ_FILE_STAT stat64
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(p, m, s) freopen64(p, m, s)
#define MZ_DELETE_FILE remove
#else
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello
#define MZ_FSEEK64 fseeko
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#endif // #ifdef _MSC_VER
#endif // #ifdef MINIZ_NO_STDIO
#define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c))
// Various ZIP archive enums. To completely avoid cross platform compiler
// alignment and platform endian issues, miniz.c doesn't use structs for any of
// this stuff.
enum {
// ZIP archive identifiers and record sizes
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50,
MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22,
// Central directory header record offsets
MZ_ZIP_CDH_SIG_OFS = 0,
MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4,
MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6,
MZ_ZIP_CDH_BIT_FLAG_OFS = 8,
MZ_ZIP_CDH_METHOD_OFS = 10,
MZ_ZIP_CDH_FILE_TIME_OFS = 12,
MZ_ZIP_CDH_FILE_DATE_OFS = 14,
MZ_ZIP_CDH_CRC32_OFS = 16,
MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20,
MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24,
MZ_ZIP_CDH_FILENAME_LEN_OFS = 28,
MZ_ZIP_CDH_EXTRA_LEN_OFS = 30,
MZ_ZIP_CDH_COMMENT_LEN_OFS = 32,
MZ_ZIP_CDH_DISK_START_OFS = 34,
MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36,
MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38,
MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42,
// Local directory header offsets
MZ_ZIP_LDH_SIG_OFS = 0,
MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4,
MZ_ZIP_LDH_BIT_FLAG_OFS = 6,
MZ_ZIP_LDH_METHOD_OFS = 8,
MZ_ZIP_LDH_FILE_TIME_OFS = 10,
MZ_ZIP_LDH_FILE_DATE_OFS = 12,
MZ_ZIP_LDH_CRC32_OFS = 14,
MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18,
MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22,
MZ_ZIP_LDH_FILENAME_LEN_OFS = 26,
MZ_ZIP_LDH_EXTRA_LEN_OFS = 28,
// End of central directory offsets
MZ_ZIP_ECDH_SIG_OFS = 0,
MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4,
MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6,
MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8,
MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10,
MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12,
MZ_ZIP_ECDH_CDIR_OFS_OFS = 16,
MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20,
};
typedef struct {
void *m_p;
size_t m_size, m_capacity;
mz_uint m_element_size;
} mz_zip_array;
struct mz_zip_internal_state_tag {
mz_zip_array m_central_dir;
mz_zip_array m_central_dir_offsets;
mz_zip_array m_sorted_central_dir_offsets;
MZ_FILE *m_pFile;
void *m_pMem;
size_t m_mem_size;
size_t m_mem_capacity;
};
#define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \
(array_ptr)->m_element_size = element_size
#define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \
((element_type *)((array_ptr)->m_p))[index]
static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip,
mz_zip_array *pArray) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p);
memset(pArray, 0, sizeof(mz_zip_array));
}
static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t min_new_capacity,
mz_uint growing) {
void *pNew_p;
size_t new_capacity = min_new_capacity;
MZ_ASSERT(pArray->m_element_size);
if (pArray->m_capacity >= min_new_capacity)
return MZ_TRUE;
if (growing) {
new_capacity = MZ_MAX(1, pArray->m_capacity);
while (new_capacity < min_new_capacity)
new_capacity *= 2;
}
if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p,
pArray->m_element_size, new_capacity)))
return MZ_FALSE;
pArray->m_p = pNew_p;
pArray->m_capacity = new_capacity;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool
mz_zip_array_reserve(mz_zip_archive *pZip, mz_zip_array *pArray,
size_t new_capacity, mz_uint growing) {
if (new_capacity > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing))
return MZ_FALSE;
}
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool
mz_zip_array_resize(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_size,
mz_uint growing) {
if (new_size > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing))
return MZ_FALSE;
}
pArray->m_size = new_size;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool
mz_zip_array_ensure_room(mz_zip_archive *pZip, mz_zip_array *pArray, size_t n) {
return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE);
}
static MZ_FORCEINLINE mz_bool
mz_zip_array_push_back(mz_zip_archive *pZip, mz_zip_array *pArray,
const void *pElements, size_t n) {
size_t orig_size = pArray->m_size;
if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE))
return MZ_FALSE;
memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size,
pElements, n * pArray->m_element_size);
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) {
struct tm tm;
memset(&tm, 0, sizeof(tm));
tm.tm_isdst = -1;
tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900;
tm.tm_mon = ((dos_date >> 5) & 15) - 1;
tm.tm_mday = dos_date & 31;
tm.tm_hour = (dos_time >> 11) & 31;
tm.tm_min = (dos_time >> 5) & 63;
tm.tm_sec = (dos_time << 1) & 62;
return mktime(&tm);
}
static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef _MSC_VER
struct tm tm_struct;
struct tm *tm = &tm_struct;
errno_t err = localtime_s(tm, &time);
if (err) {
*pDOS_date = 0;
*pDOS_time = 0;
return;
}
#else
struct tm *tm = localtime(&time);
#endif
*pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) +
((tm->tm_sec) >> 1));
*pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) +
((tm->tm_mon + 1) << 5) + tm->tm_mday);
}
#endif
#ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_get_file_modified_time(const char *pFilename,
mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef MINIZ_NO_TIME
(void)pFilename;
*pDOS_date = *pDOS_time = 0;
#else
struct MZ_FILE_STAT_STRUCT file_stat;
// On Linux with x86 glibc, this call will fail on large files (>= 0x80000000
// bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh.
if (MZ_FILE_STAT(pFilename, &file_stat) != 0)
return MZ_FALSE;
mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date);
#endif // #ifdef MINIZ_NO_TIME
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time,
time_t modified_time) {
struct utimbuf t;
t.actime = access_time;
t.modtime = modified_time;
return !utime(pFilename, &t);
}
#endif // #ifndef MINIZ_NO_TIME
#endif // #ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip,
mz_uint32 flags) {
(void)flags;
if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (!pZip->m_pAlloc)
pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree)
pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc)
pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_READING;
pZip->m_archive_size = 0;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool
mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets,
mz_uint l_index, mz_uint r_index) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
const mz_uint8 *pR =
&MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index));
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS),
r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR)))
break;
pL++;
pR++;
}
return (pL == pE) ? (l_len < r_len) : (l < r);
}
#define MZ_SWAP_UINT32(a, b) \
do { \
mz_uint32 t = a; \
a = b; \
b = t; \
} \
MZ_MACRO_END
// Heap sort of lowercased filenames, used to help accelerate plain central
// directory searches by mz_zip_reader_locate_file(). (Could also use qsort(),
// but it could allocate memory.)
static void
mz_zip_reader_sort_central_dir_offsets_by_filename(mz_zip_archive *pZip) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices =
&MZ_ZIP_ARRAY_ELEMENT(&pState->m_sorted_central_dir_offsets, mz_uint32,
0);
const int size = pZip->m_total_files;
int start = (size - 2) >> 1, end;
while (start >= 0) {
int child, root = start;
for (;;) {
if ((child = (root << 1) + 1) >= size)
break;
child +=
(((child + 1) < size) &&
(mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1])));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
start--;
}
end = size - 1;
while (end > 0) {
int child, root = 0;
MZ_SWAP_UINT32(pIndices[end], pIndices[0]);
for (;;) {
if ((child = (root << 1) + 1) >= end)
break;
child +=
(((child + 1) < end) &&
mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1]));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
end--;
}
}
static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip,
mz_uint32 flags) {
mz_uint cdir_size, num_this_disk, cdir_disk_index;
mz_uint64 cdir_ofs;
mz_int64 cur_file_ofs;
const mz_uint8 *p;
mz_uint32 buf_u32[4096 / sizeof(mz_uint32)];
mz_uint8 *pBuf = (mz_uint8 *)buf_u32;
mz_bool sort_central_dir =
((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0);
// Basic sanity checks - reject files which are too small, and check the first
// 4 bytes of the file to make sure a local header is there.
if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
// Find the end of central directory record by scanning the file from the end
// towards the beginning.
cur_file_ofs =
MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0);
for (;;) {
int i,
n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n)
return MZ_FALSE;
for (i = n - 4; i >= 0; --i)
if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG)
break;
if (i >= 0) {
cur_file_ofs += i;
break;
}
if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >=
(0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)))
return MZ_FALSE;
cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0);
}
// Read and verify the end of central directory record.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) ||
((pZip->m_total_files =
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) !=
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS)))
return MZ_FALSE;
num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS);
cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS);
if (((num_this_disk | cdir_disk_index) != 0) &&
((num_this_disk != 1) || (cdir_disk_index != 1)))
return MZ_FALSE;
if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) <
pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS);
if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size)
return MZ_FALSE;
pZip->m_central_directory_file_ofs = cdir_ofs;
if (pZip->m_total_files) {
mz_uint i, n;
// Read the entire central directory into a heap block, and allocate another
// heap block to hold the unsorted central dir file record offsets, and
// another to hold the sorted indices.
if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size,
MZ_FALSE)) ||
(!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets,
pZip->m_total_files, MZ_FALSE)))
return MZ_FALSE;
if (sort_central_dir) {
if (!mz_zip_array_resize(pZip,
&pZip->m_pState->m_sorted_central_dir_offsets,
pZip->m_total_files, MZ_FALSE))
return MZ_FALSE;
}
if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs,
pZip->m_pState->m_central_dir.m_p,
cdir_size) != cdir_size)
return MZ_FALSE;
// Now create an index into the central directory file records, do some
// basic sanity checking on each record, and check for zip64 entries (which
// are not yet supported).
p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p;
for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) {
mz_uint total_header_size, comp_size, decomp_size, disk_index;
if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) ||
(MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG))
return MZ_FALSE;
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
i) =
(mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p);
if (sort_central_dir)
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets,
mz_uint32, i) = i;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) &&
(decomp_size != comp_size)) ||
(decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) ||
(comp_size == 0xFFFFFFFF))
return MZ_FALSE;
disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS);
if ((disk_index != num_this_disk) && (disk_index != 1))
return MZ_FALSE;
if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) >
n)
return MZ_FALSE;
n -= total_header_size;
p += total_header_size;
}
}
if (sort_central_dir)
mz_zip_reader_sort_central_dir_offsets_by_filename(pZip);
return MZ_TRUE;
}
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags) {
if ((!pZip) || (!pZip->m_pRead))
return MZ_FALSE;
if (!mz_zip_reader_init_internal(pZip, flags))
return MZ_FALSE;
pZip->m_archive_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
size_t s = (file_ofs >= pZip->m_archive_size)
? 0
: (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n);
memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s);
return s;
}
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags) {
if (!mz_zip_reader_init_internal(pZip, flags))
return MZ_FALSE;
pZip->m_archive_size = size;
pZip->m_pRead = mz_zip_mem_read_func;
pZip->m_pIO_opaque = pZip;
#ifdef __cplusplus
pZip->m_pState->m_pMem = const_cast<void *>(pMem);
#else
pZip->m_pState->m_pMem = (void *)pMem;
#endif
pZip->m_pState->m_mem_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags) {
mz_uint64 file_size;
MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb");
if (!pFile)
return MZ_FALSE;
if (MZ_FSEEK64(pFile, 0, SEEK_END)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
file_size = MZ_FTELL64(pFile);
if (!mz_zip_reader_init_internal(pZip, flags)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
pZip->m_pRead = mz_zip_file_read_func;
pZip->m_pIO_opaque = pZip;
pZip->m_pState->m_pFile = pFile;
pZip->m_archive_size = file_size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) {
return pZip ? pZip->m_total_files : 0;
}
static MZ_FORCEINLINE const mz_uint8 *
mz_zip_reader_get_cdh(mz_zip_archive *pZip, mz_uint file_index) {
if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return NULL;
return &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets,
mz_uint32, file_index));
}
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint m_bit_flag;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p)
return MZ_FALSE;
m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
return (m_bit_flag & 1);
}
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint filename_len, external_attr;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p)
return MZ_FALSE;
// First see if the filename ends with a '/' character.
filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_len) {
if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/')
return MZ_TRUE;
}
// Bugfix: This code was also checking if the internal attribute was non-zero,
// which wasn't correct.
// Most/all zip writers (hopefully) set DOS file/directory attributes in the
// low 16-bits, so check for the DOS directory flag and ignore the source OS
// ID in the created by field.
// FIXME: Remove this check? Is it necessary - we already check the filename.
external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
if ((external_attr & 0x10) != 0)
return MZ_TRUE;
return MZ_FALSE;
}
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if ((!p) || (!pStat))
return MZ_FALSE;
// Unpack the central directory record.
pStat->m_file_index = file_index;
pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index);
pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS);
pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS);
pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS);
#ifndef MINIZ_NO_TIME
pStat->m_time =
mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS),
MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS));
#endif
pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS);
pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS);
pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
// Copy as much of the filename and comment as possible.
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1);
memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pStat->m_filename[n] = '\0';
n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1);
pStat->m_comment_size = n;
memcpy(pStat->m_comment, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS),
n);
pStat->m_comment[n] = '\0';
return MZ_TRUE;
}
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) {
if (filename_buf_size)
pFilename[0] = '\0';
return 0;
}
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_buf_size) {
n = MZ_MIN(n, filename_buf_size - 1);
memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pFilename[n] = '\0';
}
return n + 1;
}
static MZ_FORCEINLINE mz_bool
mz_zip_reader_string_equal(const char *pA, const char *pB, mz_uint len,
mz_uint flags) {
mz_uint i;
if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE)
return 0 == memcmp(pA, pB, len);
for (i = 0; i < len; ++i)
if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i]))
return MZ_FALSE;
return MZ_TRUE;
}
static MZ_FORCEINLINE int
mz_zip_reader_filename_compare(const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets,
mz_uint l_index, const char *pR, mz_uint r_len) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR)))
break;
pL++;
pR++;
}
return (pL == pE) ? (int)(l_len - r_len) : (l - r);
}
static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices =
&MZ_ZIP_ARRAY_ELEMENT(&pState->m_sorted_central_dir_offsets, mz_uint32,
0);
const int size = pZip->m_total_files;
const mz_uint filename_len = (mz_uint)strlen(pFilename);
int l = 0, h = size - 1;
while (l <= h) {
int m = (l + h) >> 1, file_index = pIndices[m],
comp =
mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets,
file_index, pFilename, filename_len);
if (!comp)
return file_index;
else if (comp < 0)
l = m + 1;
else
h = m - 1;
}
return -1;
}
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags) {
mz_uint file_index;
size_t name_len, comment_len;
if ((!pZip) || (!pZip->m_pState) || (!pName) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return -1;
if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) &&
(!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size))
return mz_zip_reader_locate_file_binary_search(pZip, pName);
name_len = strlen(pName);
if (name_len > 0xFFFF)
return -1;
comment_len = pComment ? strlen(pComment) : 0;
if (comment_len > 0xFFFF)
return -1;
for (file_index = 0; file_index < pZip->m_total_files; file_index++) {
const mz_uint8 *pHeader =
&MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets,
mz_uint32, file_index));
mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS);
const char *pFilename =
(const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
if (filename_len < name_len)
continue;
if (comment_len) {
mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS),
file_comment_len =
MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS);
const char *pFile_comment = pFilename + filename_len + file_extra_len;
if ((file_comment_len != comment_len) ||
(!mz_zip_reader_string_equal(pComment, pFile_comment,
file_comment_len, flags)))
continue;
}
if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) {
int ofs = filename_len - 1;
do {
if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') ||
(pFilename[ofs] == ':'))
break;
} while (--ofs >= 0);
ofs++;
pFilename += ofs;
filename_len -= ofs;
}
if ((filename_len == name_len) &&
(mz_zip_reader_string_equal(pName, pFilename, filename_len, flags)))
return file_index;
}
return -1;
}
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size) {
int status = TINFL_STATUS_DONE;
mz_uint64 needed_size, cur_file_ofs, comp_remaining,
out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail;
mz_zip_archive_file_stat file_stat;
void *pRead_buf;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
tinfl_decompressor inflator;
if ((buf_size) && (!pBuf))
return MZ_FALSE;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat))
return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size)
return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index))
return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32))
return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Ensure supplied output buffer is large enough.
needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size
: file_stat.m_uncomp_size;
if (buf_size < needed_size)
return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
(size_t)needed_size) != needed_size)
return MZ_FALSE;
return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) == file_stat.m_crc32);
}
// Decompress the file either directly from memory or from a file input
// buffer.
tinfl_init(&inflator);
if (pZip->m_pState->m_pMem) {
// Read directly from the archive in memory.
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
} else if (pUser_read_buf) {
// Use a user provided read buffer.
if (!user_read_buf_size)
return MZ_FALSE;
pRead_buf = (mz_uint8 *)pUser_read_buf;
read_buf_size = user_read_buf_size;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
} else {
// Temporarily allocate a read buffer.
read_buf_size = MZ_MIN(file_stat.m_comp_size, MZ_ZIP_MAX_IO_BUF_SIZE);
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(read_buf_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF))
#endif
return MZ_FALSE;
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
do {
size_t in_buf_size,
out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs);
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF |
(comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0));
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
out_buf_ofs += out_buf_size;
} while (status == TINFL_STATUS_NEEDS_MORE_INPUT);
if (status == TINFL_STATUS_DONE) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf))
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0)
return MZ_FALSE;
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, pUser_read_buf,
user_read_buf_size);
}
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags) {
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, NULL, 0);
}
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags) {
return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf,
buf_size, flags, NULL, 0);
}
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags) {
mz_uint64 comp_size, uncomp_size, alloc_size;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
void *pBuf;
if (pSize)
*pSize = 0;
if (!p)
return NULL;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size;
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#endif
return NULL;
if (NULL ==
(pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size)))
return NULL;
if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size,
flags)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return NULL;
}
if (pSize)
*pSize = (size_t)alloc_size;
return pBuf;
}
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) {
if (pSize)
*pSize = 0;
return MZ_FALSE;
}
return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags);
}
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int status = TINFL_STATUS_DONE;
mz_uint file_crc32 = MZ_CRC32_INIT;
mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining,
out_buf_ofs = 0, cur_file_ofs;
mz_zip_archive_file_stat file_stat;
void *pRead_buf = NULL;
void *pWrite_buf = NULL;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat))
return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size)
return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index))
return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32))
return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
// Decompress the file either directly from memory or from a file input
// buffer.
if (pZip->m_pState->m_pMem) {
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
} else {
read_buf_size = MZ_MIN(file_stat.m_comp_size, MZ_ZIP_MAX_IO_BUF_SIZE);
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pState->m_pMem) {
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#endif
return MZ_FALSE;
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)file_stat.m_comp_size) != file_stat.m_comp_size)
status = TINFL_STATUS_FAILED;
else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf,
(size_t)file_stat.m_comp_size);
cur_file_ofs += file_stat.m_comp_size;
out_buf_ofs += file_stat.m_comp_size;
comp_remaining = 0;
} else {
while (comp_remaining) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 = (mz_uint32)mz_crc32(
file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail);
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
out_buf_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
}
}
} else {
tinfl_decompressor inflator;
tinfl_init(&inflator);
if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
TINFL_LZ_DICT_SIZE)))
status = TINFL_STATUS_FAILED;
else {
do {
mz_uint8 *pWrite_buf_cur =
(mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
size_t in_buf_size,
out_buf_size =
TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size,
comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0);
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
if (out_buf_size) {
if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) !=
out_buf_size) {
status = TINFL_STATUS_FAILED;
break;
}
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size);
if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) {
status = TINFL_STATUS_FAILED;
break;
}
}
} while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) ||
(status == TINFL_STATUS_HAS_MORE_OUTPUT));
}
}
if ((status == TINFL_STATUS_DONE) &&
(!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(file_crc32 != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if (!pZip->m_pState->m_pMem)
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
if (pWrite_buf)
pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0)
return MZ_FALSE;
return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque,
flags);
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs,
const void *pBuf, size_t n) {
(void)ofs;
return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque);
}
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename,
mz_uint flags) {
mz_bool status;
mz_zip_archive_file_stat file_stat;
MZ_FILE *pFile;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat))
return MZ_FALSE;
pFile = MZ_FOPEN(pDst_filename, "wb");
if (!pFile)
return MZ_FALSE;
status = mz_zip_reader_extract_to_callback(
pZip, file_index, mz_zip_file_write_callback, pFile, flags);
if (MZ_FCLOSE(pFile) == EOF)
return MZ_FALSE;
#ifndef MINIZ_NO_TIME
if (status)
mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time);
#endif
return status;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_end(mz_zip_archive *pZip) {
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
if (pZip->m_pState) {
mz_zip_internal_state *pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
}
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags) {
int file_index =
mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags);
if (file_index < 0)
return MZ_FALSE;
return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags);
}
#endif
// ------------------- .ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
static void mz_write_le16(mz_uint8 *p, mz_uint16 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
}
static void mz_write_le32(mz_uint8 *p, mz_uint32 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
p[2] = (mz_uint8)(v >> 16);
p[3] = (mz_uint8)(v >> 24);
}
#define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v))
#define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v))
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) {
if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (pZip->m_file_offset_alignment) {
// Ensure user specified file offset alignment is a power of 2.
if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1))
return MZ_FALSE;
}
if (!pZip->m_pAlloc)
pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree)
pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc)
pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_archive_size = existing_size;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size);
#ifdef _MSC_VER
if ((!n) ||
((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#else
if ((!n) ||
((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#endif
return 0;
if (new_size > pState->m_mem_capacity) {
void *pNew_block;
size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity);
while (new_capacity < new_size)
new_capacity *= 2;
if (NULL == (pNew_block = pZip->m_pRealloc(
pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity)))
return 0;
pState->m_pMem = pNew_block;
pState->m_mem_capacity = new_capacity;
}
memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n);
pState->m_mem_size = (size_t)new_size;
return n;
}
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size) {
pZip->m_pWrite = mz_zip_heap_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning))
return MZ_FALSE;
if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size,
size_to_reserve_at_beginning))) {
if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, initial_allocation_size))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_mem_capacity = initial_allocation_size;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning) {
MZ_FILE *pFile;
pZip->m_pWrite = mz_zip_file_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning))
return MZ_FALSE;
if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_pFile = pFile;
if (size_to_reserve_at_beginning) {
mz_uint64 cur_ofs = 0;
char buf[4096];
MZ_CLEAR_OBJ(buf);
do {
size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
cur_ofs += n;
size_to_reserve_at_beginning -= n;
} while (size_to_reserve_at_beginning);
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
// No sense in trying to write to an archive that's already at the support max
// size
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
pState = pZip->m_pState;
if (pState->m_pFile) {
#ifdef MINIZ_NO_STDIO
pFilename;
return MZ_FALSE;
#else
// Archive is being read from stdio - try to reopen as writable.
if (pZip->m_pIO_opaque != pZip)
return MZ_FALSE;
if (!pFilename)
return MZ_FALSE;
pZip->m_pWrite = mz_zip_file_write_func;
if (NULL ==
(pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) {
// The mz_zip_archive is now in a bogus state because pState->m_pFile is
// NULL, so just close it.
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
#endif // #ifdef MINIZ_NO_STDIO
} else if (pState->m_pMem) {
// Archive lives in a memory block. Assume it's from the heap that we can
// resize using the realloc callback.
if (pZip->m_pIO_opaque != pZip)
return MZ_FALSE;
pState->m_mem_capacity = pState->m_mem_size;
pZip->m_pWrite = mz_zip_heap_write_func;
}
// Archive is being read via a user provided read function - make sure the
// user has specified a write function too.
else if (!pZip->m_pWrite)
return MZ_FALSE;
// Start writing new files at the archive's current central directory
// location.
pZip->m_archive_size = pZip->m_central_directory_file_ofs;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_central_directory_file_ofs = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags) {
return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0,
level_and_flags, 0, 0);
}
typedef struct {
mz_zip_archive *m_pZip;
mz_uint64 m_cur_archive_file_ofs;
mz_uint64 m_comp_size;
} mz_zip_writer_add_state;
static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len,
void *pUser) {
mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser;
if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque,
pState->m_cur_archive_file_ofs, pBuf,
len) != len)
return MZ_FALSE;
pState->m_cur_archive_file_ofs += len;
pState->m_comp_size += len;
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_local_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_central_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size,
mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method,
mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date,
mz_uint64 local_header_ofs, mz_uint32 ext_attributes) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_add_to_central_dir(
mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size,
const void *pExtra, mz_uint16 extra_size, const void *pComment,
mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs,
mz_uint32 ext_attributes) {
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size;
size_t orig_central_dir_size = pState->m_central_dir.m_size;
mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
// No zip64 support yet
if ((local_header_ofs > 0xFFFFFFFF) ||
(((mz_uint64)pState->m_central_dir.m_size +
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size +
comment_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_central_dir_header(
pZip, central_dir_header, filename_size, extra_size, comment_size,
uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time,
dos_date, local_header_ofs, ext_attributes))
return MZ_FALSE;
if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename,
filename_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra,
extra_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment,
comment_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets,
¢ral_dir_ofs, 1))) {
// Try to push the central directory array back into its original state.
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
return MZ_TRUE;
}
static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) {
// Basic ZIP archive filename validity checks: Valid filenames cannot start
// with a forward slash, cannot contain a drive letter, and cannot use
// DOS-style backward slashes.
if (*pArchive_name == '/')
return MZ_FALSE;
while (*pArchive_name) {
if ((*pArchive_name == '\\') || (*pArchive_name == ':'))
return MZ_FALSE;
pArchive_name++;
}
return MZ_TRUE;
}
static mz_uint
mz_zip_writer_compute_padding_needed_for_file_alignment(mz_zip_archive *pZip) {
mz_uint32 n;
if (!pZip->m_file_offset_alignment)
return 0;
n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1));
return (pZip->m_file_offset_alignment - n) &
(pZip->m_file_offset_alignment - 1);
}
static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip,
mz_uint64 cur_file_ofs, mz_uint32 n) {
char buf[4096];
memset(buf, 0, MZ_MIN(sizeof(buf), n));
while (n) {
mz_uint32 s = MZ_MIN(sizeof(buf), n);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s)
return MZ_FALSE;
cur_file_ofs += s;
n -= s;
}
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32) {
mz_uint16 method = 0, dos_time = 0, dos_date = 0;
mz_uint level, ext_attributes = 0, num_alignment_padding_bytes;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
tdefl_compressor *pComp = NULL;
mz_bool store_data_uncompressed;
mz_zip_internal_state *pState;
if ((int)level_and_flags < 0)
level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
store_data_uncompressed =
((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA));
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) ||
(!pArchive_name) || ((comment_size) && (!pComment)) ||
(pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
pState = pZip->m_pState;
if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size))
return MZ_FALSE;
// No zip64 support yet
if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name))
return MZ_FALSE;
#ifndef MINIZ_NO_TIME
{
time_t cur_time;
time(&cur_time);
mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date);
}
#endif // #ifndef MINIZ_NO_TIME
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF)
return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) {
// Set DOS Subdirectory attribute bit.
ext_attributes |= 0x10;
// Subdirectories cannot contain data.
if ((buf_size) || (uncomp_size))
return MZ_FALSE;
}
// Try to do any allocations before writing to the archive, so if an
// allocation fails the file remains unmodified. (A good idea if we're doing
// an in-place modification.)
if ((!mz_zip_array_ensure_room(pZip, &pState->m_central_dir,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
archive_name_size + comment_size)) ||
(!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1)))
return MZ_FALSE;
if ((!store_data_uncompressed) && (buf_size)) {
if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor))))
return MZ_FALSE;
}
if (!mz_zip_writer_write_zeros(pZip, cur_archive_file_ofs,
num_alignment_padding_bytes +
sizeof(local_dir_header))) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) {
uncomp_crc32 =
(mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size);
uncomp_size = buf_size;
if (uncomp_size <= 3) {
level = 0;
store_data_uncompressed = MZ_TRUE;
}
}
if (store_data_uncompressed) {
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf,
buf_size) != buf_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += buf_size;
comp_size = buf_size;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)
method = MZ_DEFLATED;
} else if (buf_size) {
mz_zip_writer_add_state state;
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) ||
(tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) !=
TDEFL_STATUS_DONE)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pComp = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes;
mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0,
comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
MZ_FILE *pSrc_file = NULL;
if ((int)level_and_flags < 0)
level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) ||
((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)
return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name))
return MZ_FALSE;
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF)
return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date))
return MZ_FALSE;
pSrc_file = MZ_FOPEN(pSrc_filename, "rb");
if (!pSrc_file)
return MZ_FALSE;
MZ_FSEEK64(pSrc_file, 0, SEEK_END);
uncomp_size = MZ_FTELL64(pSrc_file);
MZ_FSEEK64(pSrc_file, 0, SEEK_SET);
if (uncomp_size > 0xFFFFFFFF) {
// No zip64 support yet
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (uncomp_size <= 3)
level = 0;
if (!mz_zip_writer_write_zeros(pZip, cur_archive_file_ofs,
num_alignment_padding_bytes +
sizeof(local_dir_header))) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (uncomp_size) {
mz_uint64 uncomp_remaining = uncomp_size;
void *pRead_buf =
pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE);
if (!pRead_buf) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (!level) {
while (uncomp_remaining) {
mz_uint n = (mz_uint)MZ_MIN(MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining);
if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) ||
(pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf,
n) != n)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
uncomp_crc32 =
(mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n);
uncomp_remaining -= n;
cur_archive_file_ofs += n;
}
comp_size = uncomp_size;
} else {
mz_bool result = MZ_FALSE;
mz_zip_writer_add_state state;
tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor));
if (!pComp) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
for (;;) {
size_t in_buf_size =
(mz_uint32)MZ_MIN(uncomp_remaining, MZ_ZIP_MAX_IO_BUF_SIZE);
tdefl_status status;
if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size)
break;
uncomp_crc32 = (mz_uint32)mz_crc32(
uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size);
uncomp_remaining -= in_buf_size;
status = tdefl_compress_buffer(pComp, pRead_buf, in_buf_size,
uncomp_remaining ? TDEFL_NO_FLUSH
: TDEFL_FINISH);
if (status == TDEFL_STATUS_DONE) {
result = MZ_TRUE;
break;
} else if (status != TDEFL_STATUS_OKAY)
break;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
if (!result) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
}
MZ_FCLOSE(pSrc_file);
pSrc_file = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index) {
mz_uint n, bit_flags, num_alignment_padding_bytes;
mz_uint64 comp_bytes_remaining, local_dir_header_ofs;
mz_uint64 cur_src_file_ofs, cur_dst_file_ofs;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
size_t orig_central_dir_size;
mz_zip_internal_state *pState;
void *pBuf;
const mz_uint8 *pSrc_central_header;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
if (NULL ==
(pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index)))
return MZ_FALSE;
pState = pZip->m_pState;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) >
0xFFFFFFFF))
return MZ_FALSE;
cur_src_file_ofs =
MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
cur_dst_file_ofs = pZip->m_archive_size;
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs,
pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs,
num_alignment_padding_bytes))
return MZ_FALSE;
cur_dst_file_ofs += num_alignment_padding_bytes;
local_dir_header_ofs = cur_dst_file_ofs;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
comp_bytes_remaining =
n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
if (NULL ==
(pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)MZ_MAX(sizeof(mz_uint32) * 4,
MZ_MIN(MZ_ZIP_MAX_IO_BUF_SIZE,
comp_bytes_remaining)))))
return MZ_FALSE;
while (comp_bytes_remaining) {
n = (mz_uint)MZ_MIN(MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining);
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_dst_file_ofs += n;
comp_bytes_remaining -= n;
}
bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS);
if (bit_flags & 8) {
// Copy data descriptor
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
cur_dst_file_ofs += n;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
// no zip64 support yet
if (cur_dst_file_ofs > 0xFFFFFFFF)
return MZ_FALSE;
orig_central_dir_size = pState->m_central_dir.m_size;
memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS,
local_dir_header_ofs);
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE))
return MZ_FALSE;
n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS);
if (!mz_zip_array_push_back(
pZip, &pState->m_central_dir,
pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
if (pState->m_central_dir.m_size > 0xFFFFFFFF)
return MZ_FALSE;
n = (mz_uint32)orig_central_dir_size;
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
pZip->m_total_files++;
pZip->m_archive_size = cur_dst_file_ofs;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_uint64 central_dir_ofs, central_dir_size;
mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE];
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
pState = pZip->m_pState;
// no zip64 support yet
if ((pZip->m_total_files > 0xFFFF) ||
((pZip->m_archive_size + pState->m_central_dir.m_size +
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
central_dir_ofs = 0;
central_dir_size = 0;
if (pZip->m_total_files) {
// Write central directory
central_dir_ofs = pZip->m_archive_size;
central_dir_size = pState->m_central_dir.m_size;
pZip->m_central_directory_file_ofs = central_dir_ofs;
if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs,
pState->m_central_dir.m_p,
(size_t)central_dir_size) != central_dir_size)
return MZ_FALSE;
pZip->m_archive_size += central_dir_size;
}
// Write end of central directory record
MZ_CLEAR_OBJ(hdr);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS,
pZip->m_total_files);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs);
if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr,
sizeof(hdr)) != sizeof(hdr))
return MZ_FALSE;
#ifndef MINIZ_NO_STDIO
if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF))
return MZ_FALSE;
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_archive_size += sizeof(hdr);
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize) {
if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize))
return MZ_FALSE;
if (pZip->m_pWrite != mz_zip_heap_write_func)
return MZ_FALSE;
if (!mz_zip_writer_finalize_archive(pZip))
return MZ_FALSE;
*pBuf = pZip->m_pState->m_pMem;
*pSize = pZip->m_pState->m_mem_size;
pZip->m_pState->m_pMem = NULL;
pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_end(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_bool status = MZ_TRUE;
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) &&
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED)))
return MZ_FALSE;
pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem);
pState->m_pMem = NULL;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return status;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_bool status, created_new_archive = MZ_FALSE;
mz_zip_archive zip_archive;
struct MZ_FILE_STAT_STRUCT file_stat;
MZ_CLEAR_OBJ(zip_archive);
if ((int)level_and_flags < 0)
level_and_flags = MZ_DEFAULT_LEVEL;
if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) ||
((comment_size) && (!pComment)) ||
((level_and_flags & 0xF) > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name))
return MZ_FALSE;
if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) {
// Create a new archive.
if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0))
return MZ_FALSE;
created_new_archive = MZ_TRUE;
} else {
// Append to an existing archive.
if (!mz_zip_reader_init_file(&zip_archive, pZip_filename,
level_and_flags |
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return MZ_FALSE;
if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) {
mz_zip_reader_end(&zip_archive);
return MZ_FALSE;
}
}
status =
mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size,
pComment, comment_size, level_and_flags, 0, 0);
// Always finalize, even if adding failed for some reason, so we have a valid
// central directory. (This may not always succeed, but we can try.)
if (!mz_zip_writer_finalize_archive(&zip_archive))
status = MZ_FALSE;
if (!mz_zip_writer_end(&zip_archive))
status = MZ_FALSE;
if ((!status) && (created_new_archive)) {
// It's a new archive and something went wrong, so just delete it.
int ignoredStatus = MZ_DELETE_FILE(pZip_filename);
(void)ignoredStatus;
}
return status;
}
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint flags) {
int file_index;
mz_zip_archive zip_archive;
void *p = NULL;
if (pSize)
*pSize = 0;
if ((!pZip_filename) || (!pArchive_name))
return NULL;
MZ_CLEAR_OBJ(zip_archive);
if (!mz_zip_reader_init_file(&zip_archive, pZip_filename,
flags |
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return NULL;
if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL,
flags)) >= 0)
p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags);
mz_zip_reader_end(&zip_archive);
return p;
}
#endif // #ifndef MINIZ_NO_STDIO
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
#ifdef __cplusplus
}
#endif
#endif // MINIZ_HEADER_FILE_ONLY
/*
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>
*/
// ---------------------- end of miniz ----------------------------------------
}
bool IsBigEndian(void) {
union {
unsigned int i;
char c[4];
} bint = {0x01020304};
return bint.c[0] == 1;
}
void swap2(unsigned short *val) {
unsigned short tmp = *val;
unsigned char *dst = (unsigned char *)val;
unsigned char *src = (unsigned char *)&tmp;
dst[0] = src[1];
dst[1] = src[0];
}
void swap4(unsigned int *val) {
unsigned int tmp = *val;
unsigned char *dst = (unsigned char *)val;
unsigned char *src = (unsigned char *)&tmp;
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
}
void swap8(unsigned long long *val) {
unsigned long long tmp = (*val);
unsigned char *dst = (unsigned char *)val;
unsigned char *src = (unsigned char *)&tmp;
dst[0] = src[7];
dst[1] = src[6];
dst[2] = src[5];
dst[3] = src[4];
dst[4] = src[3];
dst[5] = src[2];
dst[6] = src[1];
dst[7] = src[0];
}
// https://gist.github.com/rygorous/2156668
// Reuse MINIZ_LITTLE_ENDIAN flag from miniz.
union FP32 {
unsigned int u;
float f;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 23;
unsigned int Exponent : 8;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 8;
unsigned int Mantissa : 23;
#endif
} s;
};
union FP16 {
unsigned short u;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 10;
unsigned int Exponent : 5;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 5;
unsigned int Mantissa : 10;
#endif
} s;
};
FP32 half_to_float(FP16 h) {
static const FP32 magic = {113 << 23};
static const unsigned int shifted_exp = 0x7c00
<< 13; // exponent mask after shift
FP32 o;
o.u = (h.u & 0x7fff) << 13; // exponent/mantissa bits
unsigned int exp_ = shifted_exp & o.u; // just the exponent
o.u += (127 - 15) << 23; // exponent adjust
// handle exponent special cases
if (exp_ == shifted_exp) // Inf/NaN?
o.u += (128 - 16) << 23; // extra exp adjust
else if (exp_ == 0) // Zero/Denormal?
{
o.u += 1 << 23; // extra exp adjust
o.f -= magic.f; // renormalize
}
o.u |= (h.u & 0x8000) << 16; // sign bit
return o;
}
FP16 float_to_half_full(FP32 f) {
FP16 o = {0};
// Based on ISPC reference code (with minor modifications)
if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow)
o.s.Exponent = 0;
else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set)
{
o.s.Exponent = 31;
o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf
} else // Normalized number
{
// Exponent unbias the single, then bias the halfp
int newexp = f.s.Exponent - 127 + 15;
if (newexp >= 31) // Overflow, return signed infinity
o.s.Exponent = 31;
else if (newexp <= 0) // Underflow
{
if ((14 - newexp) <= 24) // Mantissa might be non-zero
{
unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit
o.s.Mantissa = mant >> (14 - newexp);
if ((mant >> (13 - newexp)) & 1) // Check for rounding
o.u++; // Round, might overflow into exp bit, but this is OK
}
} else {
o.s.Exponent = newexp;
o.s.Mantissa = f.s.Mantissa >> 13;
if (f.s.Mantissa & 0x1000) // Check for rounding
o.u++; // Round, might overflow to inf, this is OK
}
}
o.s.Sign = f.s.Sign;
return o;
}
// NOTE: From OpenEXR code
// #define IMF_INCREASING_Y 0
// #define IMF_DECREASING_Y 1
// #define IMF_RAMDOM_Y 2
//
// #define IMF_NO_COMPRESSION 0
// #define IMF_RLE_COMPRESSION 1
// #define IMF_ZIPS_COMPRESSION 2
// #define IMF_ZIP_COMPRESSION 3
// #define IMF_PIZ_COMPRESSION 4
// #define IMF_PXR24_COMPRESSION 5
// #define IMF_B44_COMPRESSION 6
// #define IMF_B44A_COMPRESSION 7
const char *ReadString(std::string &s, const char *ptr) {
// Read untile NULL(\0).
const char *p = ptr;
const char *q = ptr;
while ((*q) != 0)
q++;
s = std::string(p, q);
return q + 1; // skip '\0'
}
const char *ReadAttribute(std::string &name, std::string &ty,
std::vector<unsigned char> &data, const char *ptr) {
if ((*ptr) == 0) {
// end of attribute.
return NULL;
}
const char *p = ReadString(name, ptr);
p = ReadString(ty, p);
int dataLen;
memcpy(&dataLen, p, sizeof(int));
p += 4;
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&dataLen));
}
data.resize(dataLen);
memcpy(&data.at(0), p, dataLen);
p += dataLen;
return p;
}
void WriteAttribute(FILE *fp, const char *name, const char *type,
const unsigned char *data, int len) {
size_t n = fwrite(name, 1, strlen(name) + 1, fp);
assert(n == strlen(name) + 1);
n = fwrite(type, 1, strlen(type) + 1, fp);
assert(n == strlen(type) + 1);
int outLen = len;
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&outLen));
}
n = fwrite(&outLen, 1, sizeof(int), fp);
assert(n == sizeof(int));
n = fwrite(data, 1, len, fp);
assert(n == (size_t)len);
(void)n;
}
void WriteAttributeToMemory(std::vector<unsigned char> &out, const char *name,
const char *type, const unsigned char *data,
int len) {
out.insert(out.end(), name, name + strlen(name) + 1);
out.insert(out.end(), type, type + strlen(type) + 1);
int outLen = len;
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&outLen));
}
out.insert(out.end(), reinterpret_cast<unsigned char *>(&outLen),
reinterpret_cast<unsigned char *>(&outLen) + sizeof(int));
out.insert(out.end(), data, data + len);
}
typedef struct {
std::string name; // less than 255 bytes long
int pixelType;
unsigned char pLinear;
int xSampling;
int ySampling;
} ChannelInfo;
void ReadChannelInfo(std::vector<ChannelInfo> &channels,
const std::vector<unsigned char> &data) {
const char *p = reinterpret_cast<const char *>(&data.at(0));
for (;;) {
if ((*p) == 0) {
break;
}
ChannelInfo info;
p = ReadString(info.name, p);
memcpy(&info.pixelType, p, sizeof(int));
p += 4;
info.pLinear = p[0]; // uchar
p += 1 + 3; // reserved: uchar[3]
memcpy(&info.xSampling, p, sizeof(int)); // int
p += 4;
memcpy(&info.ySampling, p, sizeof(int)); // int
p += 4;
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&info.pixelType));
swap4(reinterpret_cast<unsigned int *>(&info.xSampling));
swap4(reinterpret_cast<unsigned int *>(&info.ySampling));
}
channels.push_back(info);
}
}
void WriteChannelInfo(std::vector<unsigned char> &data,
const std::vector<ChannelInfo> &channels) {
size_t sz = 0;
// Calculate total size.
for (size_t c = 0; c < channels.size(); c++) {
sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0
sz += 16; // 4 * int
}
data.resize(sz + 1);
unsigned char *p = &data.at(0);
for (size_t c = 0; c < channels.size(); c++) {
memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str()));
p += strlen(channels[c].name.c_str());
(*p) = '\0';
p++;
int pixelType = channels[c].pixelType;
int xSampling = channels[c].xSampling;
int ySampling = channels[c].ySampling;
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&pixelType));
swap4(reinterpret_cast<unsigned int *>(&xSampling));
swap4(reinterpret_cast<unsigned int *>(&ySampling));
}
memcpy(p, &pixelType, sizeof(int));
p += sizeof(int);
(*p) = channels[c].pLinear;
p += 4;
memcpy(p, &xSampling, sizeof(int));
p += sizeof(int);
memcpy(p, &ySampling, sizeof(int));
p += sizeof(int);
}
(*p) = '\0';
}
void CompressZip(unsigned char *dst, unsigned long long &compressedSize,
const unsigned char *src, unsigned long srcSize) {
std::vector<unsigned char> tmpBuf(srcSize);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
//
// Reorder the pixel data.
//
{
char *t1 = (char *)&tmpBuf.at(0);
char *t2 = (char *)&tmpBuf.at(0) + (srcSize + 1) / 2;
const char *stop = (const char *)src + srcSize;
while (true) {
if ((const char *)src < stop)
*(t1++) = *(src++);
else
break;
if ((const char *)src < stop)
*(t2++) = *(src++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + srcSize;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = d;
++t;
}
}
//
// Compress the data using miniz
//
miniz::mz_ulong outSize = miniz::mz_compressBound(srcSize);
int ret = miniz::mz_compress(dst, &outSize,
(const unsigned char *)&tmpBuf.at(0), srcSize);
assert(ret == miniz::MZ_OK);
(void)ret;
compressedSize = outSize;
}
void DecompressZip(unsigned char *dst, unsigned long &uncompressedSize,
const unsigned char *src, unsigned long srcSize) {
std::vector<unsigned char> tmpBuf(uncompressedSize);
int ret =
miniz::mz_uncompress(&tmpBuf.at(0), &uncompressedSize, src, srcSize);
assert(ret == miniz::MZ_OK);
(void)ret;
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + uncompressedSize;
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = d;
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(uncompressedSize + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + uncompressedSize;
while (true) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
}
//
// PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp
//
// -----------------------------------------------------------------
// Copyright (c) 2004, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC)
// (3 clause BSD license)
//
struct PIZChannelData {
unsigned short *start;
unsigned short *end;
int nx;
int ny;
int ys;
int size;
};
//-----------------------------------------------------------------------------
//
// 16-bit Haar Wavelet encoding and decoding
//
// The source code in this file is derived from the encoding
// and decoding routines written by Christian Rouet for his
// PIZ image file format.
//
//-----------------------------------------------------------------------------
//
// Wavelet basis functions without modulo arithmetic; they produce
// the best compression ratios when the wavelet-transformed data are
// Huffman-encoded, but the wavelet transform works only for 14-bit
// data (untransformed data values must be less than (1 << 14)).
//
#if 0 // @todo
inline void wenc14(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
short as = a;
short bs = b;
short ms = (as + bs) >> 1;
short ds = as - bs;
l = ms;
h = ds;
}
#endif
inline void wdec14(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
short ls = l;
short hs = h;
int hi = hs;
int ai = ls + (hi & 1) + (hi >> 1);
short as = ai;
short bs = ai - hi;
a = as;
b = bs;
}
//
// Wavelet basis functions with modulo arithmetic; they work with full
// 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't
// compress the data quite as well.
//
const int NBITS = 16;
const int A_OFFSET = 1 << (NBITS - 1);
//const int M_OFFSET = 1 << (NBITS - 1);
const int MOD_MASK = (1 << NBITS) - 1;
#if 0 // @ood
inline void wenc16(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
int ao = (a + A_OFFSET) & MOD_MASK;
int m = ((ao + b) >> 1);
int d = ao - b;
if (d < 0)
m = (m + M_OFFSET) & MOD_MASK;
d &= MOD_MASK;
l = m;
h = d;
}
#endif
inline void wdec16(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
int m = l;
int d = h;
int bb = (m - (d >> 1)) & MOD_MASK;
int aa = (d + bb - A_OFFSET) & MOD_MASK;
b = bb;
a = aa;
}
//
// 2D Wavelet encoding:
//
#if 0 // @todo
void wav2Encode(unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1; // == 1 << level
int p2 = 2; // == 1 << (level+1)
//
// Hierachical loop on smaller dimension n
//
while (p2 <= n) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet encoding
//
if (w14) {
wenc14(*px, *p01, i00, i01);
wenc14(*p10, *p11, i10, i11);
wenc14(i00, i10, *px, *p10);
wenc14(i01, i11, *p01, *p11);
} else {
wenc16(*px, *p01, i00, i01);
wenc16(*p10, *p11, i10, i11);
wenc16(i00, i10, *px, *p10);
wenc16(i01, i11, *p01, *p11);
}
}
//
// Encode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wenc14(*px, *p10, i00, *p10);
else
wenc16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Encode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wenc14(*px, *p01, i00, *p01);
else
wenc16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p = p2;
p2 <<= 1;
}
}
#endif
//
// 2D Wavelet decoding:
//
void wav2Decode(unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1;
int p2;
//
// Search max level
//
while (p <= n)
p <<= 1;
p >>= 1;
p2 = p;
p >>= 1;
//
// Hierarchical loop on smaller dimension n
//
while (p >= 1) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet decoding
//
if (w14) {
wdec14(*px, *p10, i00, i10);
wdec14(*p01, *p11, i01, i11);
wdec14(i00, i01, *px, *p01);
wdec14(i10, i11, *p10, *p11);
} else {
wdec16(*px, *p10, i00, i10);
wdec16(*p01, *p11, i01, i11);
wdec16(i00, i01, *px, *p01);
wdec16(i10, i11, *p10, *p11);
}
}
//
// Decode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wdec14(*px, *p10, i00, *p10);
else
wdec16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Decode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wdec14(*px, *p01, i00, *p01);
else
wdec16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p2 = p;
p >>= 1;
}
}
//-----------------------------------------------------------------------------
//
// 16-bit Huffman compression and decompression.
//
// The source code in this file is derived from the 8-bit
// Huffman compression and decompression routines written
// by Christian Rouet for his PIZ image file format.
//
//-----------------------------------------------------------------------------
// Adds some modification for tinyexr.
const int HUF_ENCBITS = 16; // literal (value) bit length
const int HUF_DECBITS = 14; // decoding bit size (>= 8)
const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size
const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size
const int HUF_DECMASK = HUF_DECSIZE - 1;
struct HufDec { // short code long code
//-------------------------------
int len : 8; // code length 0
int lit : 24; // lit p size
int *p; // 0 lits
};
inline long long hufLength(long long code) { return code & 63; }
inline long long hufCode(long long code) { return code >> 6; }
#if 0
inline void outputBits(int nBits, long long bits, long long &c, int &lc,
char *&out) {
c <<= nBits;
lc += nBits;
c |= bits;
while (lc >= 8)
*out++ = (c >> (lc -= 8));
}
#endif
inline long long getBits(int nBits, long long &c, int &lc, const char *&in) {
while (lc < nBits) {
c = (c << 8) | *(unsigned char *)(in++);
lc += 8;
}
lc -= nBits;
return (c >> lc) & ((1 << nBits) - 1);
}
//
// ENCODING TABLE BUILDING & (UN)PACKING
//
//
// Build a "canonical" Huffman code table:
// - for each (uncompressed) symbol, hcode contains the length
// of the corresponding code (in the compressed data)
// - canonical codes are computed and stored in hcode
// - the rules for constructing canonical codes are as follows:
// * shorter codes (if filled with zeroes to the right)
// have a numerically higher value than longer codes
// * for codes with the same length, numerical values
// increase with numerical symbol values
// - because the canonical code table can be constructed from
// symbol lengths alone, the code table can be transmitted
// without sending the actual code values
// - see http://www.compressconsult.com/huffman/
//
void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) {
long long n[59];
//
// For each i from 0 through 58, count the
// number of different codes of length i, and
// store the count in n[i].
//
for (int i = 0; i <= 58; ++i)
n[i] = 0;
for (int i = 0; i < HUF_ENCSIZE; ++i)
n[hcode[i]] += 1;
//
// For each i from 58 through 1, compute the
// numerically lowest code with length i, and
// store that code in n[i].
//
long long c = 0;
for (int i = 58; i > 0; --i) {
long long nc = ((c + n[i]) >> 1);
n[i] = c;
c = nc;
}
//
// hcode[i] contains the length, l, of the
// code for symbol i. Assign the next available
// code of length l to the symbol and store both
// l and the code in hcode[i].
//
for (int i = 0; i < HUF_ENCSIZE; ++i) {
int l = hcode[i];
if (l > 0)
hcode[i] = l | (n[l]++ << 6);
}
}
//
// Compute Huffman codes (based on frq input) and store them in frq:
// - code structure is : [63:lsb - 6:msb] | [5-0: bit length];
// - max code length is 58 bits;
// - codes outside the range [im-iM] have a null length (unused values);
// - original frequencies are destroyed;
// - encoding tables are used by hufEncode() and hufBuildDecTable();
//
#if 0 // @todo
struct FHeapCompare {
bool operator()(long long *a, long long *b) { return *a > *b; }
};
void hufBuildEncTable(
long long *frq, // io: input frequencies [HUF_ENCSIZE], output table
int *im, // o: min frq index
int *iM) // o: max frq index
{
//
// This function assumes that when it is called, array frq
// indicates the frequency of all possible symbols in the data
// that are to be Huffman-encoded. (frq[i] contains the number
// of occurrences of symbol i in the data.)
//
// The loop below does three things:
//
// 1) Finds the minimum and maximum indices that point
// to non-zero entries in frq:
//
// frq[im] != 0, and frq[i] == 0 for all i < im
// frq[iM] != 0, and frq[i] == 0 for all i > iM
//
// 2) Fills array fHeap with pointers to all non-zero
// entries in frq.
//
// 3) Initializes array hlink such that hlink[i] == i
// for all array entries.
//
int hlink[HUF_ENCSIZE];
long long *fHeap[HUF_ENCSIZE];
*im = 0;
while (!frq[*im])
(*im)++;
int nf = 0;
for (int i = *im; i < HUF_ENCSIZE; i++) {
hlink[i] = i;
if (frq[i]) {
fHeap[nf] = &frq[i];
nf++;
*iM = i;
}
}
//
// Add a pseudo-symbol, with a frequency count of 1, to frq;
// adjust the fHeap and hlink array accordingly. Function
// hufEncode() uses the pseudo-symbol for run-length encoding.
//
(*iM)++;
frq[*iM] = 1;
fHeap[nf] = &frq[*iM];
nf++;
//
// Build an array, scode, such that scode[i] contains the number
// of bits assigned to symbol i. Conceptually this is done by
// constructing a tree whose leaves are the symbols with non-zero
// frequency:
//
// Make a heap that contains all symbols with a non-zero frequency,
// with the least frequent symbol on top.
//
// Repeat until only one symbol is left on the heap:
//
// Take the two least frequent symbols off the top of the heap.
// Create a new node that has first two nodes as children, and
// whose frequency is the sum of the frequencies of the first
// two nodes. Put the new node back into the heap.
//
// The last node left on the heap is the root of the tree. For each
// leaf node, the distance between the root and the leaf is the length
// of the code for the corresponding symbol.
//
// The loop below doesn't actually build the tree; instead we compute
// the distances of the leaves from the root on the fly. When a new
// node is added to the heap, then that node's descendants are linked
// into a single linear list that starts at the new node, and the code
// lengths of the descendants (that is, their distance from the root
// of the tree) are incremented by one.
//
std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
long long scode[HUF_ENCSIZE];
memset(scode, 0, sizeof(long long) * HUF_ENCSIZE);
while (nf > 1) {
//
// Find the indices, mm and m, of the two smallest non-zero frq
// values in fHeap, add the smallest frq to the second-smallest
// frq, and remove the smallest frq value from fHeap.
//
int mm = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
--nf;
int m = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
frq[m] += frq[mm];
std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
//
// The entries in scode are linked into lists with the
// entries in hlink serving as "next" pointers and with
// the end of a list marked by hlink[j] == j.
//
// Traverse the lists that start at scode[m] and scode[mm].
// For each element visited, increment the length of the
// corresponding code by one bit. (If we visit scode[j]
// during the traversal, then the code for symbol j becomes
// one bit longer.)
//
// Merge the lists that start at scode[m] and scode[mm]
// into a single list that starts at scode[m].
//
//
// Add a bit to all codes in the first list.
//
for (int j = m; true; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) {
//
// Merge the two lists.
//
hlink[j] = mm;
break;
}
}
//
// Add a bit to all codes in the second list
//
for (int j = mm; true; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j)
break;
}
}
//
// Build a canonical Huffman code table, replacing the code
// lengths in scode with (code, code length) pairs. Copy the
// code table from scode into frq.
//
hufCanonicalCodeTable(scode);
memcpy(frq, scode, sizeof(long long) * HUF_ENCSIZE);
}
#endif
//
// Pack an encoding table:
// - only code lengths, not actual codes, are stored
// - runs of zeroes are compressed as follows:
//
// unpacked packed
// --------------------------------
// 1 zero 0 (6 bits)
// 2 zeroes 59
// 3 zeroes 60
// 4 zeroes 61
// 5 zeroes 62
// n zeroes (6 or more) 63 n-6 (6 + 8 bits)
//
const int SHORT_ZEROCODE_RUN = 59;
const int LONG_ZEROCODE_RUN = 63;
const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN;
//const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN;
#if 0
void hufPackEncTable(const long long *hcode, // i : encoding table [HUF_ENCSIZE]
int im, // i : min hcode index
int iM, // i : max hcode index
char **pcode) // o: ptr to packed table (updated)
{
char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
int l = hufLength(hcode[im]);
if (l == 0) {
int zerun = 1;
while ((im < iM) && (zerun < LONGEST_LONG_RUN)) {
if (hufLength(hcode[im + 1]) > 0)
break;
im++;
zerun++;
}
if (zerun >= 2) {
if (zerun >= SHORTEST_LONG_RUN) {
outputBits(6, LONG_ZEROCODE_RUN, c, lc, p);
outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p);
} else {
outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p);
}
continue;
}
}
outputBits(6, l, c, lc, p);
}
if (lc > 0)
*p++ = (unsigned char)(c << (8 - lc));
*pcode = p;
}
#endif
//
// Unpack an encoding table packed by hufPackEncTable():
//
bool hufUnpackEncTable(const char **pcode, // io: ptr to packed table (updated)
int ni, // i : input size (in bytes)
int im, // i : min hcode index
int iM, // i : max hcode index
long long *hcode) // o: encoding table [HUF_ENCSIZE]
{
memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE);
const char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
if (p - *pcode > ni) {
return false;
}
long long l = hcode[im] = getBits(6, c, lc, p); // code length
if (l == (long long)LONG_ZEROCODE_RUN) {
if (p - *pcode > ni) {
return false;
}
int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--)
hcode[im++] = 0;
im--;
} else if (l >= (long long)SHORT_ZEROCODE_RUN) {
int zerun = l - SHORT_ZEROCODE_RUN + 2;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--)
hcode[im++] = 0;
im--;
}
}
*pcode = const_cast<char *>(p);
hufCanonicalCodeTable(hcode);
return true;
}
//
// DECODING TABLE BUILDING
//
//
// Clear a newly allocated decoding table so that it contains only zeroes.
//
void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
for (int i = 0; i < HUF_DECSIZE; i++) {
hdecod[i].len = 0;
hdecod[i].lit = 0;
hdecod[i].p = NULL;
}
//memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE);
}
//
// Build a decoding hash table based on the encoding table hcode:
// - short codes (<= HUF_DECBITS) are resolved with a single table access;
// - long code entry allocations are not optimized, because long codes are
// unfrequent;
// - decoding tables are used by hufDecode();
//
bool hufBuildDecTable(const long long *hcode, // i : encoding table
int im, // i : min index in hcode
int iM, // i : max index in hcode
HufDec *hdecod) // o: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
//
// Init hashtable & loop on all codes.
// Assumes that hufClearDecTable(hdecod) has already been called.
//
for (; im <= iM; im++) {
long long c = hufCode(hcode[im]);
int l = hufLength(hcode[im]);
if (c >> l) {
//
// Error: c is supposed to be an l-bit code,
// but c contains a value that is greater
// than the largest l-bit number.
//
// invalidTableEntry();
return false;
}
if (l > HUF_DECBITS) {
//
// Long code: add a secondary entry
//
HufDec *pl = hdecod + (c >> (l - HUF_DECBITS));
if (pl->len) {
//
// Error: a short code has already
// been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->lit++;
if (pl->p) {
int *p = pl->p;
pl->p = new int[pl->lit];
for (int i = 0; i < pl->lit - 1; ++i)
pl->p[i] = p[i];
delete[] p;
} else {
pl->p = new int[1];
}
pl->p[pl->lit - 1] = im;
} else if (l) {
//
// Short code: init all primary entries
//
HufDec *pl = hdecod + (c << (HUF_DECBITS - l));
for (long long i = 1 << (HUF_DECBITS - l); i > 0; i--, pl++) {
if (pl->len || pl->p) {
//
// Error: a short code or a long code has
// already been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->len = l;
pl->lit = im;
}
}
}
return true;
}
//
// Free the long code entries of a decoding table built by hufBuildDecTable()
//
void hufFreeDecTable(HufDec *hdecod) // io: Decoding table
{
for (int i = 0; i < HUF_DECSIZE; i++) {
if (hdecod[i].p) {
delete[] hdecod[i].p;
hdecod[i].p = 0;
}
}
}
//
// ENCODING
//
#if 0 // @todo
inline void outputCode(long long code, long long &c, int &lc, char *&out) {
outputBits(hufLength(code), hufCode(code), c, lc, out);
}
inline void sendCode(long long sCode, int runCount, long long runCode,
long long &c, int &lc, char *&out) {
//
// Output a run of runCount instances of the symbol sCount.
// Output the symbols explicitly, or if that is shorter, output
// the sCode symbol once followed by a runCode symbol and runCount
// expressed as an 8-bit number.
//
if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) {
outputCode(sCode, c, lc, out);
outputCode(runCode, c, lc, out);
outputBits(8, runCount, c, lc, out);
} else {
while (runCount-- >= 0)
outputCode(sCode, c, lc, out);
}
}
//
// Encode (compress) ni values based on the Huffman encoding table hcode:
//
int hufEncode // return: output size (in bits)
(const long long *hcode, // i : encoding table
const unsigned short *in, // i : uncompressed input buffer
const int ni, // i : input buffer size (in bytes)
int rlc, // i : rl code
char *out) // o: compressed output buffer
{
char *outStart = out;
long long c = 0; // bits not yet written to out
int lc = 0; // number of valid bits in c (LSB)
int s = in[0];
int cs = 0;
//
// Loop on input values
//
for (int i = 1; i < ni; i++) {
//
// Count same values or send code
//
if (s == in[i] && cs < 255) {
cs++;
} else {
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
cs = 0;
}
s = in[i];
}
//
// Send remaining code
//
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
if (lc)
*out = (c << (8 - lc)) & 0xff;
return (out - outStart) * 8 + lc;
}
#endif
//
// DECODING
//
//
// In order to force the compiler to inline them,
// getChar() and getCode() are implemented as macros
// instead of "inline" functions.
//
#define getChar(c, lc, in) \
{ \
c = (c << 8) | *(unsigned char *)(in++); \
lc += 8; \
}
#define getCode(po, rlc, c, lc, in, out, oe) \
{ \
if (po == rlc) { \
if (lc < 8) \
getChar(c, lc, in); \
\
lc -= 8; \
\
unsigned char cs = (c >> lc); \
\
if (out + cs > oe) \
return false; \
\
unsigned short s = out[-1]; \
\
while (cs-- > 0) \
*out++ = s; \
} else if (out < oe) { \
*out++ = po; \
} else { \
return false; \
} \
}
//
// Decode (uncompress) ni bits based on encoding & decoding tables:
//
bool hufDecode(const long long *hcode, // i : encoding table
const HufDec *hdecod, // i : decoding table
const char *in, // i : compressed input buffer
int ni, // i : input size (in bits)
int rlc, // i : run-length code
int no, // i : expected output size (in bytes)
unsigned short *out) // o: uncompressed output buffer
{
long long c = 0;
int lc = 0;
unsigned short *outb = out;
unsigned short *oe = out + no;
const char *ie = in + (ni + 7) / 8; // input byte size
//
// Loop on input bytes
//
while (in < ie) {
getChar(c, lc, in);
//
// Access decoding table
//
while (lc >= HUF_DECBITS) {
const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK];
if (pl.len) {
//
// Get short code
//
lc -= pl.len;
getCode(pl.lit, rlc, c, lc, in, out, oe);
} else {
if (!pl.p) {
return false;
}
// invalidCode(); // wrong code
//
// Search long code
//
int j;
for (j = 0; j < pl.lit; j++) {
int l = hufLength(hcode[pl.p[j]]);
while (lc < l && in < ie) // get more bits
getChar(c, lc, in);
if (lc >= l) {
if (hufCode(hcode[pl.p[j]]) ==
((c >> (lc - l)) & (((long long)(1) << l) - 1))) {
//
// Found : get long code
//
lc -= l;
getCode(pl.p[j], rlc, c, lc, in, out, oe);
break;
}
}
}
if (j == pl.lit) {
return false;
// invalidCode(); // Not found
}
}
}
}
//
// Get remaining (short) codes
//
int i = (8 - ni) & 7;
c >>= i;
lc -= i;
while (lc > 0) {
const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK];
if (pl.len) {
lc -= pl.len;
getCode(pl.lit, rlc, c, lc, in, out, oe);
} else {
return false;
// invalidCode(); // wrong (long) code
}
}
if (out - outb != no) {
return false;
}
// notEnoughData ();
return true;
}
#if 0 // @todo
void countFrequencies(long long freq[HUF_ENCSIZE],
const unsigned short data[/*n*/], int n) {
for (int i = 0; i < HUF_ENCSIZE; ++i)
freq[i] = 0;
for (int i = 0; i < n; ++i)
++freq[data[i]];
}
void writeUInt(char buf[4], unsigned int i) {
unsigned char *b = (unsigned char *)buf;
b[0] = i;
b[1] = i >> 8;
b[2] = i >> 16;
b[3] = i >> 24;
}
#endif
unsigned int readUInt(const char buf[4]) {
const unsigned char *b = (const unsigned char *)buf;
return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) |
((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000);
}
//
// EXTERNAL INTERFACE
//
#if 0 // @todo
int hufCompress(const unsigned short raw[], int nRaw, char compressed[]) {
if (nRaw == 0)
return 0;
long long freq[HUF_ENCSIZE];
countFrequencies(freq, raw, nRaw);
int im = 0;
int iM = 0;
hufBuildEncTable(freq, &im, &iM);
char *tableStart = compressed + 20;
char *tableEnd = tableStart;
hufPackEncTable(freq, im, iM, &tableEnd);
int tableLength = tableEnd - tableStart;
char *dataStart = tableEnd;
int nBits = hufEncode(freq, raw, nRaw, iM, dataStart);
int dataLength = (nBits + 7) / 8;
writeUInt(compressed, im);
writeUInt(compressed + 4, iM);
writeUInt(compressed + 8, tableLength);
writeUInt(compressed + 12, nBits);
writeUInt(compressed + 16, 0); // room for future extensions
return dataStart + dataLength - compressed;
}
#endif
bool hufUncompress(const char compressed[], int nCompressed,
unsigned short raw[], int nRaw) {
if (nCompressed == 0) {
if (nRaw != 0)
return false;
return false;
}
int im = readUInt(compressed);
int iM = readUInt(compressed + 4);
// int tableLength = readUInt (compressed + 8);
int nBits = readUInt(compressed + 12);
if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE)
return false;
const char *ptr = compressed + 20;
//
// Fast decoder needs at least 2x64-bits of compressed data, and
// needs to be run-able on this platform. Otherwise, fall back
// to the original decoder
//
// if (FastHufDecoder::enabled() && nBits > 128)
//{
// FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM);
// fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw);
//}
// else
{
std::vector<long long> freq(HUF_ENCSIZE);
std::vector<HufDec> hdec(HUF_DECSIZE);
hufClearDecTable(&hdec.at(0));
hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM, &freq.at(0));
{
if (nBits > 8 * (nCompressed - (ptr - compressed))) {
return false;
}
hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0));
hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, nRaw, raw);
}
// catch (...)
//{
// hufFreeDecTable (hdec);
// throw;
//}
hufFreeDecTable(&hdec.at(0));
}
return true;
}
//
// Functions to compress the range of values in the pixel data
//
const int USHORT_RANGE = (1 << 16);
const int BITMAP_SIZE = (USHORT_RANGE >> 3);
#if 0 // @todo
void bitmapFromData(const unsigned short data[/*nData*/], int nData,
unsigned char bitmap[BITMAP_SIZE],
unsigned short &minNonZero, unsigned short &maxNonZero) {
for (int i = 0; i < BITMAP_SIZE; ++i)
bitmap[i] = 0;
for (int i = 0; i < nData; ++i)
bitmap[data[i] >> 3] |= (1 << (data[i] & 7));
bitmap[0] &= ~1; // zero is not explicitly stored in
// the bitmap; we assume that the
// data always contain zeroes
minNonZero = BITMAP_SIZE - 1;
maxNonZero = 0;
for (int i = 0; i < BITMAP_SIZE; ++i) {
if (bitmap[i]) {
if (minNonZero > i)
minNonZero = i;
if (maxNonZero < i)
maxNonZero = i;
}
}
}
unsigned short forwardLutFromBitmap(const unsigned char bitmap[BITMAP_SIZE],
unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7))))
lut[i] = k++;
else
lut[i] = 0;
}
return k - 1; // maximum value stored in lut[],
} // i.e. number of ones in bitmap minus 1
#endif
unsigned short reverseLutFromBitmap(const unsigned char bitmap[BITMAP_SIZE],
unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7))))
lut[k++] = i;
}
int n = k - 1;
while (k < USHORT_RANGE)
lut[k++] = 0;
return n; // maximum k where lut[k] is non-zero,
} // i.e. number of ones in bitmap minus 1
void applyLut(const unsigned short lut[USHORT_RANGE],
unsigned short data[/*nData*/], int nData) {
for (int i = 0; i < nData; ++i)
data[i] = lut[data[i]];
}
#if 0 // @todo
bool CompressPiz(unsigned char *outPtr, unsigned int &outSize) {
unsigned char bitmap[BITMAP_SIZE];
unsigned short minNonZero;
unsigned short maxNonZero;
if (IsBigEndian()) {
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
}
std::vector<unsigned short> tmpBuffer;
int nData = tmpBuffer.size();
bitmapFromData(&tmpBuffer.at(0), nData, bitmap, minNonZero, maxNonZero);
unsigned short lut[USHORT_RANGE];
//unsigned short maxValue = forwardLutFromBitmap(bitmap, lut);
applyLut(lut, &tmpBuffer.at(0), nData);
//
// Store range compression info in _outBuffer
//
char *buf = reinterpret_cast<char *>(outPtr);
memcpy(buf, &minNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
memcpy(buf, &maxNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
if (minNonZero <= maxNonZero) {
memcpy(buf, (char *)&bitmap[0] + minNonZero, maxNonZero - minNonZero + 1);
buf += maxNonZero - minNonZero + 1;
}
#if 0 // @todo
//
// Apply wavelet encoding
//
for (int i = 0; i < channels; ++i)
{
ChannelData &cd = _channelData[i];
for (int j = 0; j < cd.size; ++j)
{
wav2Encode (cd.start + j,
cd.nx, cd.size,
cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Apply Huffman encoding; append the result to _outBuffer
//
char *lengthPtr = buf;
int zero = 0;
memcpy(buf, &zero, sizeof(int)); buf += sizeof(int);
int length = hufCompress (_tmpBuffer, tmpBufferEnd - _tmpBuffer, buf);
memcpy(lengthPtr, tmpBuffer, length);
//Xdr::write <CharPtrIO> (lengthPtr, length);
outPtr = _outBuffer;
return buf - _outBuffer + length;
#endif
assert(0);
return true;
}
#endif
bool DecompressPiz(unsigned char *outPtr, unsigned int &outSize,
const unsigned char *inPtr, size_t tmpBufSize,
const std::vector<ChannelInfo> &channelInfo, int dataWidth,
int numLines) {
unsigned char bitmap[BITMAP_SIZE];
unsigned short minNonZero;
unsigned short maxNonZero;
if (IsBigEndian()) {
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
}
memset(bitmap, 0, BITMAP_SIZE);
const unsigned char *ptr = inPtr;
minNonZero = *(reinterpret_cast<const unsigned short *>(ptr));
maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2));
ptr += 4;
if (maxNonZero >= BITMAP_SIZE) {
return false;
}
if (minNonZero <= maxNonZero) {
memcpy((char *)&bitmap[0] + minNonZero, ptr, maxNonZero - minNonZero + 1);
ptr += maxNonZero - minNonZero + 1;
}
unsigned short lut[USHORT_RANGE];
memset(lut, 0, sizeof(unsigned short) * USHORT_RANGE);
unsigned short maxValue = reverseLutFromBitmap(bitmap, lut);
//
// Huffman decoding
//
int length;
length = *(reinterpret_cast<const int *>(ptr));
ptr += sizeof(int);
std::vector<unsigned short> tmpBuffer(tmpBufSize);
hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer.at(0),
tmpBufSize);
//
// Wavelet decoding
//
std::vector<PIZChannelData> channelData(channelInfo.size());
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t i = 0; i < channelInfo.size(); ++i) {
const ChannelInfo &chan = channelInfo[i];
int pixelSize = sizeof(int); // UINT and FLOAT
if (chan.pixelType == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
channelData[i].start = tmpBufferEnd;
channelData[i].end = channelData[i].start;
channelData[i].nx = dataWidth;
channelData[i].ny = numLines;
// channelData[i].ys = 1;
channelData[i].size = pixelSize / sizeof(short);
tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size;
}
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Expand the pixel data to their original range
//
applyLut(lut, &tmpBuffer.at(0), tmpBufSize);
// @todo { Xdr }
for (int y = 0; y < numLines; y++) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
int n = cd.nx * cd.size;
memcpy(outPtr, cd.end, n * sizeof(unsigned short));
outPtr += n * sizeof(unsigned short);
cd.end += n;
}
}
return true;
}
//
// -----------------------------------------------------------------
//
} // namespace
int LoadEXR(float **out_rgba, int *width, int *height, const char *filename,
const char **err) {
if (out_rgba == NULL) {
if (err) {
(*err) = "Invalid argument.\n";
}
return -1;
}
EXRImage exrImage;
InitEXRImage(&exrImage);
{
int ret = ParseMultiChannelEXRHeaderFromFile(&exrImage, filename, err);
if (ret != 0) {
return ret;
}
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exrImage.num_channels; i++) {
if (exrImage.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exrImage.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
{
int ret = LoadMultiChannelEXRFromFile(&exrImage, filename, err);
if (ret != 0) {
return ret;
}
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
for (int c = 0; c < exrImage.num_channels; c++) {
if (strcmp(exrImage.channel_names[c], "R") == 0) {
idxR = c;
} else if (strcmp(exrImage.channel_names[c], "G") == 0) {
idxG = c;
} else if (strcmp(exrImage.channel_names[c], "B") == 0) {
idxB = c;
} else if (strcmp(exrImage.channel_names[c], "A") == 0) {
idxA = c;
}
}
if (idxR == -1) {
if (err) {
(*err) = "R channel not found\n";
}
// @todo { free exrImage }
return -1;
}
if (idxG == -1) {
if (err) {
(*err) = "G channel not found\n";
}
// @todo { free exrImage }
return -1;
}
if (idxB == -1) {
if (err) {
(*err) = "B channel not found\n";
}
// @todo { free exrImage }
return -1;
}
(*out_rgba) =
(float *)malloc(4 * sizeof(float) * exrImage.width * exrImage.height);
for (int i = 0; i < exrImage.width * exrImage.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exrImage.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exrImage.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exrImage.images)[idxB][i];
if (idxA > 0) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exrImage.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
(*width) = exrImage.width;
(*height) = exrImage.height;
// @todo { free exrImage }
return 0;
}
int ParseEXRHeaderFromMemory(EXRAttribute* customAttributes, int *numCustomAttributes, int *width, int *height,
const unsigned char *memory) {
if (memory == NULL) {
// Invalid argument
return -1;
}
const char *buf = reinterpret_cast<const char *>(memory);
const char *marker = &buf[0];
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
// if (err) {
// (*err) = "Header mismatch.";
//}
return -3;
}
marker += 4;
}
// Version, scanline.
{
// must be [2, 0, 0, 0]
if (marker[0] != 2 || marker[1] != 0 || marker[2] != 0 || marker[3] != 0) {
// if (err) {
// (*err) = "Unsupported version or scanline.";
//}
return -4;
}
marker += 4;
}
int dx = -1;
int dy = -1;
int dw = -1;
int dh = -1;
int lineOrder = 0; // @fixme
int displayWindow[4] = {-1, -1, -1, -1}; // @fixme
float screenWindowCenter[2] = {0.0f, 0.0f}; // @fixme
float screenWindowWidth = 1.0f; // @fixme
int numChannels = -1;
float pixelAspectRatio = 1.0f; // @fixme
std::vector<ChannelInfo> channels;
std::vector<EXRAttribute> attribs;
if (numCustomAttributes) {
(*numCustomAttributes) = 0;
}
// Read attributes
for (;;) {
std::string attrName;
std::string attrType;
std::vector<unsigned char> data;
const char *marker_next = ReadAttribute(attrName, attrType, data, marker);
if (marker_next == NULL) {
marker++; // skip '\0'
break;
}
if (attrName.compare("compression") == 0) {
// must be 0:No compression, 1: RLE or 3: ZIP
// if (data[0] != 0 && data[0] != 1 && data[0] != 3) {
// mwkm
// 0 : NO_COMPRESSION
// 1 : RLE
// 2 : ZIPS (Single scanline)
// 3 : ZIP (16-line block)
// 4 : PIZ (32-line block)
if (data[0] > 4) {
// if (err) {
// (*err) = "Unsupported compression type.";
//}
return -5;
}
} else if (attrName.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
ReadChannelInfo(channels, data);
numChannels = channels.size();
if (numChannels < 1) {
// if (err) {
// (*err) = "Invalid channels format.";
//}
return -6;
}
} else if (attrName.compare("dataWindow") == 0) {
memcpy(&dx, &data.at(0), sizeof(int));
memcpy(&dy, &data.at(4), sizeof(int));
memcpy(&dw, &data.at(8), sizeof(int));
memcpy(&dh, &data.at(12), sizeof(int));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&dx));
swap4(reinterpret_cast<unsigned int *>(&dy));
swap4(reinterpret_cast<unsigned int *>(&dw));
swap4(reinterpret_cast<unsigned int *>(&dh));
}
} else if (attrName.compare("displayWindow") == 0) {
memcpy(&displayWindow[0], &data.at(0), sizeof(int));
memcpy(&displayWindow[1], &data.at(4), sizeof(int));
memcpy(&displayWindow[2], &data.at(8), sizeof(int));
memcpy(&displayWindow[3], &data.at(12), sizeof(int));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&displayWindow[0]));
swap4(reinterpret_cast<unsigned int *>(&displayWindow[1]));
swap4(reinterpret_cast<unsigned int *>(&displayWindow[2]));
swap4(reinterpret_cast<unsigned int *>(&displayWindow[3]));
}
} else if (attrName.compare("lineOrder") == 0) {
memcpy(&lineOrder, &data.at(0), sizeof(float));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&lineOrder));
}
} else if (attrName.compare("pixelAspectRatio") == 0) {
memcpy(&pixelAspectRatio, &data.at(0), sizeof(float));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&pixelAspectRatio));
}
} else if (attrName.compare("screenWindowCenter") == 0) {
memcpy(&screenWindowCenter[0], &data.at(0), sizeof(float));
memcpy(&screenWindowCenter[1], &data.at(4), sizeof(float));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&screenWindowCenter[0]));
swap4(reinterpret_cast<unsigned int *>(&screenWindowCenter[1]));
}
} else if (attrName.compare("screenWindowWidth") == 0) {
memcpy(&screenWindowWidth, &data.at(0), sizeof(float));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&screenWindowWidth));
}
} else {
// Custom attribute(up to TINYEXR_MAX_ATTRIBUTES)
if (numCustomAttributes && ((*numCustomAttributes) < TINYEXR_MAX_ATTRIBUTES)) {
printf("custom\n");
EXRAttribute attrib;
attrib.name = strdup(attrName.c_str());
attrib.type = strdup(attrType.c_str());
attrib.size = data.size();
attrib.value = (unsigned char*)malloc(data.size());
memcpy((char*)attrib.value, &data.at(0), data.size());
attribs.push_back(attrib);
}
}
marker = marker_next;
}
assert(dx >= 0);
assert(dy >= 0);
assert(dw >= 0);
assert(dh >= 0);
assert(numChannels >= 1);
int dataWidth = dw - dx + 1;
int dataHeight = dh - dy + 1;
(*width) = dataWidth;
(*height) = dataHeight;
if (numCustomAttributes) {
assert(attribs.size() < TINYEXR_MAX_ATTRIBUTES);
(*numCustomAttributes) = attribs.size();
// Assume the pointer to customAttributes has enough memory to store.
for (int i = 0; i < (int)attribs.size(); i++) {
customAttributes[i] = attribs[i];
}
}
return 0;
}
int LoadEXRFromMemory(float *out_rgba, const unsigned char *memory,
const char **err) {
if (out_rgba == NULL || memory == NULL) {
if (err) {
(*err) = "Invalid argument.\n";
}
return -1;
}
EXRImage exrImage;
InitEXRImage(&exrImage);
int ret = LoadMultiChannelEXRFromMemory(&exrImage, memory, err);
if (ret != 0) {
return ret;
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
for (int c = 0; c < exrImage.num_channels; c++) {
if (strcmp(exrImage.channel_names[c], "R") == 0) {
idxR = c;
} else if (strcmp(exrImage.channel_names[c], "G") == 0) {
idxG = c;
} else if (strcmp(exrImage.channel_names[c], "B") == 0) {
idxB = c;
} else if (strcmp(exrImage.channel_names[c], "A") == 0) {
idxA = c;
}
}
if (idxR == -1) {
if (err) {
(*err) = "R channel not found\n";
}
// @todo { free exrImage }
return -1;
}
if (idxG == -1) {
if (err) {
(*err) = "G channel not found\n";
}
// @todo { free exrImage }
return -1;
}
if (idxB == -1) {
if (err) {
(*err) = "B channel not found\n";
}
// @todo { free exrImage }
return -1;
}
// Assume `out_rgba` have enough memory allocated.
for (int i = 0; i < exrImage.width * exrImage.height; i++) {
out_rgba[4 * i + 0] = reinterpret_cast<float **>(exrImage.images)[idxR][i];
out_rgba[4 * i + 1] = reinterpret_cast<float **>(exrImage.images)[idxG][i];
out_rgba[4 * i + 2] = reinterpret_cast<float **>(exrImage.images)[idxB][i];
if (idxA > 0) {
out_rgba[4 * i + 3] =
reinterpret_cast<float **>(exrImage.images)[idxA][i];
} else {
out_rgba[4 * i + 3] = 1.0;
}
}
return 0;
}
int LoadMultiChannelEXRFromFile(EXRImage *exrImage, const char *filename,
const char **err) {
if (exrImage == NULL) {
if (err) {
(*err) = "Invalid argument.";
}
return -1;
}
FILE *fp = fopen(filename, "rb");
if (!fp) {
if (err) {
(*err) = "Cannot read file.";
}
return -1;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = ftell(fp);
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadMultiChannelEXRFromMemory(exrImage, &buf.at(0), err);
}
int LoadMultiChannelEXRFromMemory(EXRImage *exrImage,
const unsigned char *memory,
const char **err) {
if (exrImage == NULL || memory == NULL) {
if (err) {
(*err) = "Invalid argument.";
}
return -1;
}
const char *buf = reinterpret_cast<const char *>(memory);
const char *head = &buf[0];
const char *marker = &buf[0];
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
if (err) {
(*err) = "Header mismatch.";
}
return -3;
}
marker += 4;
}
// Version, scanline.
{
// must be [2, 0, 0, 0]
if (marker[0] != 2 || marker[1] != 0 || marker[2] != 0 || marker[3] != 0) {
if (err) {
(*err) = "Unsupported version or scanline.";
}
return -4;
}
marker += 4;
}
int dx = -1;
int dy = -1;
int dw = -1;
int dh = -1;
int numScanlineBlocks = 1; // 16 for ZIP compression.
int compressionType = -1;
int numChannels = -1;
unsigned char lineOrder = 0; // 0 -> increasing y; 1 -> decreasing
std::vector<ChannelInfo> channels;
// Read attributes
for (;;) {
std::string attrName;
std::string attrType;
std::vector<unsigned char> data;
const char *marker_next = ReadAttribute(attrName, attrType, data, marker);
if (marker_next == NULL) {
marker++; // skip '\0'
break;
}
if (attrName.compare("compression") == 0) {
// mwkm
// 0 : NO_COMPRESSION
// 1 : RLE
// 2 : ZIPS (Single scanline)
// 3 : ZIP (16-line block)
// 4 : PIZ (32-line block)
if (data[0] != 0 && data[0] != 2 && data[0] != 3 && data[0] != 4) {
if (err) {
(*err) = "Unsupported compression type.";
}
return -5;
}
compressionType = data[0];
if (compressionType == 3) { // ZIP
numScanlineBlocks = 16;
} else if (compressionType == 4) { // PIZ
numScanlineBlocks = 32;
}
} else if (attrName.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
ReadChannelInfo(channels, data);
numChannels = channels.size();
if (numChannels < 1) {
if (err) {
(*err) = "Invalid channels format.";
}
return -6;
}
} else if (attrName.compare("dataWindow") == 0) {
memcpy(&dx, &data.at(0), sizeof(int));
memcpy(&dy, &data.at(4), sizeof(int));
memcpy(&dw, &data.at(8), sizeof(int));
memcpy(&dh, &data.at(12), sizeof(int));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&dx));
swap4(reinterpret_cast<unsigned int *>(&dy));
swap4(reinterpret_cast<unsigned int *>(&dw));
swap4(reinterpret_cast<unsigned int *>(&dh));
}
} else if (attrName.compare("displayWindow") == 0) {
int x, y, w, h;
memcpy(&x, &data.at(0), sizeof(int));
memcpy(&y, &data.at(4), sizeof(int));
memcpy(&w, &data.at(8), sizeof(int));
memcpy(&h, &data.at(12), sizeof(int));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&x));
swap4(reinterpret_cast<unsigned int *>(&y));
swap4(reinterpret_cast<unsigned int *>(&w));
swap4(reinterpret_cast<unsigned int *>(&h));
}
} else if (attrName.compare("lineOrder") == 0) {
memcpy(&lineOrder, &data.at(0), sizeof(lineOrder));
}
marker = marker_next;
}
assert(dx >= 0);
assert(dy >= 0);
assert(dw >= 0);
assert(dh >= 0);
assert(numChannels >= 1);
int dataWidth = dw - dx + 1;
int dataHeight = dh - dy + 1;
// Read offset tables.
int numBlocks = dataHeight / numScanlineBlocks;
if (numBlocks * numScanlineBlocks < dataHeight) {
numBlocks++;
}
std::vector<long long> offsets(numBlocks);
for (int y = 0; y < numBlocks; y++) {
long long offset;
memcpy(&offset, marker, sizeof(long long));
if (IsBigEndian()) {
swap8(reinterpret_cast<unsigned long long *>(&offset));
}
marker += sizeof(long long); // = 8
offsets[y] = offset;
}
// mwkm
// Supported : 0, 2(ZIPS), 3(ZIP), 4(PIZ)
if (compressionType != 0 && compressionType != 2 && compressionType != 3 &&
compressionType != 4) {
if (err) {
(*err) = "Unsupported format.";
}
return -10;
}
exrImage->images = reinterpret_cast<unsigned char **>(
(float **)malloc(sizeof(float *) * numChannels));
std::vector<size_t> channelOffsetList(numChannels);
int pixelDataSize = 0;
size_t channelOffset = 0;
for (int c = 0; c < numChannels; c++) {
channelOffsetList[c] = channelOffset;
if (channels[c].pixelType == TINYEXR_PIXELTYPE_HALF) {
pixelDataSize += sizeof(unsigned short);
channelOffset += sizeof(unsigned short);
// Alloc internal image for half type.
if (exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
exrImage->images[c] =
reinterpret_cast<unsigned char *>((unsigned short *)malloc(
sizeof(unsigned short) * dataWidth * dataHeight));
} else if (exrImage->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
exrImage->images[c] = reinterpret_cast<unsigned char *>(
(float *)malloc(sizeof(float) * dataWidth * dataHeight));
} else {
assert(0);
}
} else if (channels[c].pixelType == TINYEXR_PIXELTYPE_FLOAT) {
pixelDataSize += sizeof(float);
channelOffset += sizeof(float);
exrImage->images[c] = reinterpret_cast<unsigned char *>(
(float *)malloc(sizeof(float) * dataWidth * dataHeight));
} else if (channels[c].pixelType == TINYEXR_PIXELTYPE_UINT) {
pixelDataSize += sizeof(unsigned int);
channelOffset += sizeof(unsigned int);
exrImage->images[c] = reinterpret_cast<unsigned char *>((
unsigned int *)malloc(sizeof(unsigned int) * dataWidth * dataHeight));
} else {
assert(0);
}
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int y = 0; y < numBlocks; y++) {
const unsigned char *dataPtr =
reinterpret_cast<const unsigned char *>(head + offsets[y]);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed or compressed)
int lineNo;
memcpy(&lineNo, dataPtr, sizeof(int));
int dataLen;
memcpy(&dataLen, dataPtr + 4, sizeof(int));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&lineNo));
swap4(reinterpret_cast<unsigned int *>(&dataLen));
}
int endLineNo = (std::min)(lineNo + numScanlineBlocks, dataHeight);
int numLines = endLineNo - lineNo;
if (compressionType == 4) { // PIZ
// Allocate original data size.
std::vector<unsigned char> outBuf(dataWidth * numLines * pixelDataSize);
unsigned int dstLen;
size_t tmpBufLen = dataWidth * numLines * pixelDataSize;
DecompressPiz(reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen,
dataPtr + 8, tmpBufLen, channels, dataWidth, numLines);
bool isBigEndian = IsBigEndian();
// For ZIP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (int c = 0; c < numChannels; c++) {
if (channels[c].pixelType == TINYEXR_PIXELTYPE_HALF) {
for (int v = 0; v < numLines; v++) {
const unsigned short *linePtr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * pixelDataSize * dataWidth +
channelOffsetList[c] * dataWidth));
for (int u = 0; u < dataWidth; u++) {
FP16 hf;
hf.u = linePtr[u];
if (isBigEndian) {
swap2(reinterpret_cast<unsigned short *>(&hf.u));
}
if (exrImage->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(exrImage->images)[c];
if (lineOrder == 0) {
image += (lineNo + v) * dataWidth + u;
} else {
image += (dataHeight - 1 - (lineNo + v)) * dataWidth + u;
}
*image = hf.u;
} else { // HALF -> FLOAT
FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(exrImage->images)[c];
if (lineOrder == 0) {
image += (lineNo + v) * dataWidth + u;
} else {
image += (dataHeight - 1 - (lineNo + v)) * dataWidth + u;
}
*image = f32.f;
}
}
}
} else if (channels[c].pixelType == TINYEXR_PIXELTYPE_UINT) {
assert(exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (int v = 0; v < numLines; v++) {
const unsigned int *linePtr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixelDataSize * dataWidth +
channelOffsetList[c] * dataWidth));
for (int u = 0; u < dataWidth; u++) {
unsigned int val = linePtr[u];
if (isBigEndian) {
swap4(&val);
}
unsigned int *image =
reinterpret_cast<unsigned int **>(exrImage->images)[c];
if (lineOrder == 0) {
image += (lineNo + v) * dataWidth + u;
} else {
image += (dataHeight - 1 - (lineNo + v)) * dataWidth + u;
}
*image = val;
}
}
} else if (channels[c].pixelType == TINYEXR_PIXELTYPE_FLOAT) {
assert(exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (int v = 0; v < numLines; v++) {
const float *linePtr = reinterpret_cast<float *>(
&outBuf.at(v * pixelDataSize * dataWidth +
channelOffsetList[c] * dataWidth));
for (int u = 0; u < dataWidth; u++) {
float val = linePtr[u];
if (isBigEndian) {
swap4(reinterpret_cast<unsigned int *>(&val));
}
float *image = reinterpret_cast<float **>(exrImage->images)[c];
if (lineOrder == 0) {
image += (lineNo + v) * dataWidth + u;
} else {
image += (dataHeight - 1 - (lineNo + v)) * dataWidth + u;
}
*image = val;
}
}
} else {
assert(0);
}
}
// mwkm, ZIPS or ZIP both good to go
} else if (compressionType == 2 || compressionType == 3) { // ZIP
// Allocate original data size.
std::vector<unsigned char> outBuf(dataWidth * numLines * pixelDataSize);
unsigned long dstLen = outBuf.size();
DecompressZip(reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen,
dataPtr + 8, dataLen);
bool isBigEndian = IsBigEndian();
// For ZIP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (int c = 0; c < numChannels; c++) {
if (channels[c].pixelType == TINYEXR_PIXELTYPE_HALF) {
for (int v = 0; v < numLines; v++) {
const unsigned short *linePtr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * pixelDataSize * dataWidth +
channelOffsetList[c] * dataWidth));
for (int u = 0; u < dataWidth; u++) {
FP16 hf;
hf.u = linePtr[u];
if (isBigEndian) {
swap2(reinterpret_cast<unsigned short *>(&hf.u));
}
if (exrImage->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(exrImage->images)[c];
if (lineOrder == 0) {
image += (lineNo + v) * dataWidth + u;
} else {
image += (dataHeight - 1 - (lineNo + v)) * dataWidth + u;
}
*image = hf.u;
} else { // HALF -> FLOAT
FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(exrImage->images)[c];
if (lineOrder == 0) {
image += (lineNo + v) * dataWidth + u;
} else {
image += (dataHeight - 1 - (lineNo + v)) * dataWidth + u;
}
*image = f32.f;
}
}
}
} else if (channels[c].pixelType == TINYEXR_PIXELTYPE_UINT) {
assert(exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (int v = 0; v < numLines; v++) {
const unsigned int *linePtr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixelDataSize * dataWidth +
channelOffsetList[c] * dataWidth));
for (int u = 0; u < dataWidth; u++) {
unsigned int val = linePtr[u];
if (isBigEndian) {
swap4(&val);
}
unsigned int *image =
reinterpret_cast<unsigned int **>(exrImage->images)[c];
if (lineOrder == 0) {
image += (lineNo + v) * dataWidth + u;
} else {
image += (dataHeight - 1 - (lineNo + v)) * dataWidth + u;
}
*image = val;
}
}
} else if (channels[c].pixelType == TINYEXR_PIXELTYPE_FLOAT) {
assert(exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (int v = 0; v < numLines; v++) {
const float *linePtr = reinterpret_cast<float *>(
&outBuf.at(v * pixelDataSize * dataWidth +
channelOffsetList[c] * dataWidth));
for (int u = 0; u < dataWidth; u++) {
float val = linePtr[u];
if (isBigEndian) {
swap4(reinterpret_cast<unsigned int *>(&val));
}
float *image = reinterpret_cast<float **>(exrImage->images)[c];
if (lineOrder == 0) {
image += (lineNo + v) * dataWidth + u;
} else {
image += (dataHeight - 1 - (lineNo + v)) * dataWidth + u;
}
*image = val;
}
}
} else {
assert(0);
}
}
} else if (compressionType == 0) { // No compression
bool isBigEndian = IsBigEndian();
for (int c = 0; c < numChannels; c++) {
if (channels[c].pixelType == TINYEXR_PIXELTYPE_HALF) {
const unsigned short *linePtr =
reinterpret_cast<const unsigned short *>(
dataPtr + 8 + c * dataWidth * sizeof(unsigned short));
if (exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *outLine =
reinterpret_cast<unsigned short *>(exrImage->images[c]);
if (lineOrder == 0) {
outLine += y * dataWidth;
} else {
outLine += (dataHeight - 1 - y) * dataWidth;
}
for (int u = 0; u < dataWidth; u++) {
FP16 hf;
hf.u = linePtr[u];
if (isBigEndian) {
swap2(reinterpret_cast<unsigned short *>(&hf.u));
}
outLine[u] = hf.u;
}
} else if (exrImage->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
float *outLine = reinterpret_cast<float *>(exrImage->images[c]);
if (lineOrder == 0) {
outLine += y * dataWidth;
} else {
outLine += (dataHeight - 1 - y) * dataWidth;
}
for (int u = 0; u < dataWidth; u++) {
FP16 hf;
hf.u = linePtr[u];
if (isBigEndian) {
swap2(reinterpret_cast<unsigned short *>(&hf.u));
}
FP32 f32 = half_to_float(hf);
outLine[u] = f32.f;
}
} else {
assert(0);
}
} else if (channels[c].pixelType == TINYEXR_PIXELTYPE_FLOAT) {
const float *linePtr = reinterpret_cast<const float *>(
dataPtr + 8 + c * dataWidth * sizeof(float));
float *outLine = reinterpret_cast<float *>(exrImage->images[c]);
if (lineOrder == 0) {
outLine += y * dataWidth;
} else {
outLine += (dataHeight - 1 - y) * dataWidth;
}
for (int u = 0; u < dataWidth; u++) {
float val = linePtr[u];
if (isBigEndian) {
swap4(reinterpret_cast<unsigned int *>(&val));
}
outLine[u] = val;
}
} else if (channels[c].pixelType == TINYEXR_PIXELTYPE_UINT) {
const unsigned int *linePtr = reinterpret_cast<const unsigned int *>(
dataPtr + 8 + c * dataWidth * sizeof(unsigned int));
unsigned int *outLine =
reinterpret_cast<unsigned int *>(exrImage->images[c]);
if (lineOrder == 0) {
outLine += y * dataWidth;
} else {
outLine += (dataHeight - 1 - y) * dataWidth;
}
for (int u = 0; u < dataWidth; u++) {
unsigned int val = linePtr[u];
if (isBigEndian) {
swap4(reinterpret_cast<unsigned int *>(&val));
}
outLine[u] = val;
}
}
}
}
} // omp parallel
{
exrImage->channel_names =
(const char **)malloc(sizeof(const char *) * numChannels);
for (int c = 0; c < numChannels; c++) {
#ifdef _WIN32
exrImage->channel_names[c] = _strdup(channels[c].name.c_str());
#else
exrImage->channel_names[c] = strdup(channels[c].name.c_str());
#endif
}
exrImage->num_channels = numChannels;
exrImage->width = dataWidth;
exrImage->height = dataHeight;
// Fill with requested_pixel_types.
exrImage->pixel_types = (int *)malloc(sizeof(int *) * numChannels);
for (int c = 0; c < numChannels; c++) {
exrImage->pixel_types[c] = exrImage->requested_pixel_types[c];
}
}
return 0; // OK
}
// @deprecated
#if 0
int SaveEXR(const float *in_rgba, int width, int height, const char *filename,
const char **err) {
if (in_rgba == NULL || filename == NULL) {
if (err) {
(*err) = "Invalid argument.";
}
return -1;
}
FILE *fp = fopen(filename, "wb");
if (!fp) {
if (err) {
(*err) = "Cannot write a file.";
}
return -1;
}
// Header
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
size_t n = fwrite(header, 1, 4, fp);
assert(n == 4);
}
// Version, scanline.
{
const char marker[] = {2, 0, 0, 0};
size_t n = fwrite(marker, 1, 4, fp);
assert(n == 4);
}
int numScanlineBlocks = 16; // 16 for ZIP compression.
// Write attributes.
{
unsigned char data[] = {
'A', 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 'B',
0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 'G', 0,
1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 'R', 0, 1,
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0}; // last 0 =
// terminator.
WriteAttribute(fp, "channels", "chlist", data, 18 * 4 + 1); // +1 = null
}
{
int compressionType = 3; // ZIP compression
WriteAttribute(fp, "compression", "compression",
reinterpret_cast<const unsigned char *>(&compressionType),
1);
}
{
int data[4] = {0, 0, width - 1, height - 1};
WriteAttribute(fp, "dataWindow", "box2i",
reinterpret_cast<const unsigned char *>(data),
sizeof(int) * 4);
WriteAttribute(fp, "displayWindow", "box2i",
reinterpret_cast<const unsigned char *>(data),
sizeof(int) * 4);
}
{
unsigned char lineOrder = 0; // increasingY
WriteAttribute(fp, "lineOrder", "lineOrder", &lineOrder, 1);
}
{
float aspectRatio = 1.0f;
WriteAttribute(fp, "pixelAspectRatio", "float",
reinterpret_cast<const unsigned char *>(&aspectRatio),
sizeof(float));
}
{
float center[2] = {0.0f, 0.0f};
WriteAttribute(fp, "screenWindowCenter", "v2f",
reinterpret_cast<const unsigned char *>(center),
2 * sizeof(float));
}
{
float w = (float)width;
WriteAttribute(fp, "screenWindowWidth", "float",
reinterpret_cast<const unsigned char *>(&w), sizeof(float));
}
{ // end of header
unsigned char e = 0;
fwrite(&e, 1, 1, fp);
}
int numBlocks = height / numScanlineBlocks;
if (numBlocks * numScanlineBlocks < height) {
numBlocks++;
}
std::vector<long long> offsets(numBlocks);
size_t headerSize = ftell(fp); // sizeof(header)
long long offset =
headerSize +
numBlocks * sizeof(long long); // sizeof(header) + sizeof(offsetTable)
std::vector<unsigned char> data;
for (int i = 0; i < numBlocks; i++) {
int startY = numScanlineBlocks * i;
int endY = (std::min)(numScanlineBlocks * (i + 1), height);
int h = endY - startY;
std::vector<unsigned short> buf(4 * width * h);
for (int y = 0; y < h; y++) {
for (int x = 0; x < width; x++) {
FP32 r, g, b, a;
r.f = in_rgba[4 * ((y + startY) * width + x) + 0];
g.f = in_rgba[4 * ((y + startY) * width + x) + 1];
b.f = in_rgba[4 * ((y + startY) * width + x) + 2];
a.f = in_rgba[4 * ((y + startY) * width + x) + 3];
FP16 hr, hg, hb, ha;
hr = float_to_half_full(r);
hg = float_to_half_full(g);
hb = float_to_half_full(b);
ha = float_to_half_full(a);
// Assume increasing Y
buf[4 * y * width + 3 * width + x] = hr.u;
buf[4 * y * width + 2 * width + x] = hg.u;
buf[4 * y * width + 1 * width + x] = hb.u;
buf[4 * y * width + 0 * width + x] = ha.u;
}
}
int bound = miniz::mz_compressBound(buf.size() * sizeof(unsigned short));
std::vector<unsigned char> block(
miniz::mz_compressBound(buf.size() * sizeof(unsigned short)));
unsigned long long outSize = block.size();
CompressZip(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
buf.size() * sizeof(unsigned short));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int dataLen = outSize; // truncate
memcpy(&header.at(0), &startY, sizeof(int));
memcpy(&header.at(4), &dataLen, sizeof(unsigned int));
data.insert(data.end(), header.begin(), header.end());
data.insert(data.end(), block.begin(), block.begin() + dataLen);
offsets[i] = offset;
offset += dataLen + 8; // 8 = sizeof(blockHeader)
}
fwrite(&offsets.at(0), 1, sizeof(unsigned long long) * numBlocks, fp);
fwrite(&data.at(0), 1, data.size(), fp);
fclose(fp);
return 0; // OK
}
#endif
size_t SaveMultiChannelEXRToMemory(const EXRImage *exrImage,
unsigned char **memory_out,
const char **err) {
if (exrImage == NULL || memory_out == NULL) {
if (err) {
(*err) = "Invalid argument.";
}
return -1;
}
std::vector<unsigned char> memory;
// Header
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
memory.insert(memory.end(), header, header + 4);
}
// Version, scanline.
{
const char marker[] = {2, 0, 0, 0};
memory.insert(memory.end(), marker, marker + 4);
}
int numScanlineBlocks =
16; // 1 for no compress & ZIPS, 16 for ZIP compression.
// Write attributes.
{
std::vector<unsigned char> data;
std::vector<ChannelInfo> channels;
for (int c = 0; c < exrImage->num_channels; c++) {
ChannelInfo info;
info.pLinear = 0;
info.pixelType = exrImage->requested_pixel_types[c];
info.xSampling = 1;
info.ySampling = 1;
info.name = std::string(exrImage->channel_names[c]);
channels.push_back(info);
}
WriteChannelInfo(data, channels);
WriteAttributeToMemory(memory, "channels", "chlist", &data.at(0),
data.size()); // +1 = null
}
{
int compressionType = 3; // ZIP compression
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&compressionType));
}
WriteAttributeToMemory(
memory, "compression", "compression",
reinterpret_cast<const unsigned char *>(&compressionType), 1);
}
{
int data[4] = {0, 0, exrImage->width - 1, exrImage->height - 1};
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&data[0]));
swap4(reinterpret_cast<unsigned int *>(&data[1]));
swap4(reinterpret_cast<unsigned int *>(&data[2]));
swap4(reinterpret_cast<unsigned int *>(&data[3]));
}
WriteAttributeToMemory(memory, "dataWindow", "box2i",
reinterpret_cast<const unsigned char *>(data),
sizeof(int) * 4);
WriteAttributeToMemory(memory, "displayWindow", "box2i",
reinterpret_cast<const unsigned char *>(data),
sizeof(int) * 4);
}
{
unsigned char lineOrder = 0; // increasingY
WriteAttributeToMemory(memory, "lineOrder", "lineOrder", &lineOrder, 1);
}
{
float aspectRatio = 1.0f;
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&aspectRatio));
}
WriteAttributeToMemory(
memory, "pixelAspectRatio", "float",
reinterpret_cast<const unsigned char *>(&aspectRatio), sizeof(float));
}
{
float center[2] = {0.0f, 0.0f};
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(¢er[0]));
swap4(reinterpret_cast<unsigned int *>(¢er[1]));
}
WriteAttributeToMemory(memory, "screenWindowCenter", "v2f",
reinterpret_cast<const unsigned char *>(center),
2 * sizeof(float));
}
{
float w = (float)exrImage->width;
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&w));
}
WriteAttributeToMemory(memory, "screenWindowWidth", "float",
reinterpret_cast<const unsigned char *>(&w),
sizeof(float));
}
// Custom attributes
if (exrImage->num_custom_attributes > 0) {
printf("custom\n");
// @todo { endian }
for (int i = 0; i < exrImage->num_custom_attributes; i++) {
WriteAttributeToMemory(memory, exrImage->custom_attributes[i].name, exrImage->custom_attributes[i].type,
reinterpret_cast<const unsigned char *>(&exrImage->custom_attributes[i].value),
exrImage->custom_attributes[i].size);
}
}
{ // end of header
unsigned char e = 0;
memory.push_back(e);
}
int numBlocks = exrImage->height / numScanlineBlocks;
if (numBlocks * numScanlineBlocks < exrImage->height) {
numBlocks++;
}
std::vector<long long> offsets(numBlocks);
size_t headerSize = memory.size();
long long offset =
headerSize +
numBlocks * sizeof(long long); // sizeof(header) + sizeof(offsetTable)
std::vector<unsigned char> data;
bool isBigEndian = IsBigEndian();
std::vector<std::vector<unsigned char> > dataList(numBlocks);
std::vector<size_t> channelOffsetList(exrImage->num_channels);
int pixelDataSize = 0;
size_t channelOffset = 0;
for (int c = 0; c < exrImage->num_channels; c++) {
channelOffsetList[c] = channelOffset;
if (exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
pixelDataSize += sizeof(unsigned short);
channelOffset += sizeof(unsigned short);
} else if (exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
pixelDataSize += sizeof(float);
channelOffset += sizeof(float);
} else if (exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) {
pixelDataSize += sizeof(unsigned int);
channelOffset += sizeof(unsigned int);
} else {
assert(0);
}
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < numBlocks; i++) {
int startY = numScanlineBlocks * i;
int endY = (std::min)(numScanlineBlocks * (i + 1), exrImage->height);
int h = endY - startY;
std::vector<unsigned char> buf(exrImage->width * h * pixelDataSize);
for (int c = 0; c < exrImage->num_channels; c++) {
if (exrImage->pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
if (exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < exrImage->width; x++) {
FP16 h16;
h16.u = reinterpret_cast<unsigned short **>(
exrImage->images)[c][(y + startY) * exrImage->width + x];
FP32 f32 = half_to_float(h16);
if (isBigEndian) {
swap4(reinterpret_cast<unsigned int *>(&f32.f));
}
// Assume increasing Y
float *linePtr = reinterpret_cast<float *>(
&buf.at(pixelDataSize * y * exrImage->width +
channelOffsetList[c] * exrImage->width));
linePtr[x] = f32.f;
}
}
} else if (exrImage->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < exrImage->width; x++) {
unsigned short val = reinterpret_cast<unsigned short **>(
exrImage->images)[c][(y + startY) * exrImage->width + x];
if (isBigEndian) {
swap2(&val);
}
// Assume increasing Y
unsigned short *linePtr = reinterpret_cast<unsigned short *>(
&buf.at(pixelDataSize * y * exrImage->width +
channelOffsetList[c] * exrImage->width));
linePtr[x] = val;
}
}
} else {
assert(0);
}
} else if (exrImage->pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
if (exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < exrImage->width; x++) {
FP32 f32;
f32.f = reinterpret_cast<float **>(
exrImage->images)[c][(y + startY) * exrImage->width + x];
FP16 h16;
h16 = float_to_half_full(f32);
if (isBigEndian) {
swap2(reinterpret_cast<unsigned short *>(&h16.u));
}
// Assume increasing Y
unsigned short *linePtr = reinterpret_cast<unsigned short *>(
&buf.at(pixelDataSize * y * exrImage->width +
channelOffsetList[c] * exrImage->width));
linePtr[x] = h16.u;
}
}
} else if (exrImage->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < exrImage->width; x++) {
float val = reinterpret_cast<float **>(
exrImage->images)[c][(y + startY) * exrImage->width + x];
if (isBigEndian) {
swap4(reinterpret_cast<unsigned int *>(&val));
}
// Assume increasing Y
float *linePtr = reinterpret_cast<float *>(
&buf.at(pixelDataSize * y * exrImage->width +
channelOffsetList[c] * exrImage->width));
linePtr[x] = val;
}
}
} else {
assert(0);
}
} else if (exrImage->pixel_types[c] == TINYEXR_PIXELTYPE_UINT) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < exrImage->width; x++) {
unsigned int val = reinterpret_cast<unsigned int **>(
exrImage->images)[c][(y + startY) * exrImage->width + x];
if (isBigEndian) {
swap4(&val);
}
// Assume increasing Y
unsigned int *linePtr = reinterpret_cast<unsigned int *>(
&buf.at(pixelDataSize * y * exrImage->width +
channelOffsetList[c] * exrImage->width));
linePtr[x] = val;
}
}
}
}
std::vector<unsigned char> block(miniz::mz_compressBound(buf.size()));
unsigned long long outSize = block.size();
CompressZip(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
buf.size());
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int dataLen = outSize; // truncate
memcpy(&header.at(0), &startY, sizeof(int));
memcpy(&header.at(4), &dataLen, sizeof(unsigned int));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
}
dataList[i].insert(dataList[i].end(), header.begin(), header.end());
dataList[i].insert(dataList[i].end(), block.begin(),
block.begin() + dataLen);
// data.insert(data.end(), header.begin(), header.end());
// data.insert(data.end(), block.begin(), block.begin() + dataLen);
// offsets[i] = offset;
// if (IsBigEndian()) {
// swap8(reinterpret_cast<unsigned long long*>(&offsets[i]));
//}
// offset += dataLen + 8; // 8 = sizeof(blockHeader)
} // omp parallel
for (int i = 0; i < numBlocks; i++) {
data.insert(data.end(), dataList[i].begin(), dataList[i].end());
offsets[i] = offset;
if (IsBigEndian()) {
swap8(reinterpret_cast<unsigned long long *>(&offsets[i]));
}
offset += dataList[i].size();
}
{
memory.insert(memory.end(),
reinterpret_cast<unsigned char *>(&offsets.at(0)),
reinterpret_cast<unsigned char *>(&offsets.at(0)) +
sizeof(unsigned long long) * numBlocks);
}
{ memory.insert(memory.end(), data.begin(), data.end()); }
assert(memory.size() > 0);
(*memory_out) = (unsigned char *)malloc(memory.size());
memcpy((*memory_out), &memory.at(0), memory.size());
return memory.size(); // OK
}
int SaveMultiChannelEXRToFile(const EXRImage *exrImage, const char *filename,
const char **err) {
if (exrImage == NULL || filename == NULL) {
if (err) {
(*err) = "Invalid argument.";
}
return -1;
}
FILE *fp = fopen(filename, "wb");
if (!fp) {
if (err) {
(*err) = "Cannot write a file.";
}
return -1;
}
unsigned char *mem = NULL;
size_t mem_size = SaveMultiChannelEXRToMemory(exrImage, &mem, err);
if ((mem_size > 0) && mem) {
fwrite(mem, 1, mem_size, fp);
}
free(mem);
fclose(fp);
return 0; // OK
}
int LoadDeepEXR(DeepImage *deepImage, const char *filename, const char **err) {
if (deepImage == NULL) {
if (err) {
(*err) = "Invalid argument.";
}
return -1;
}
FILE *fp = fopen(filename, "rb");
if (!fp) {
if (err) {
(*err) = "Cannot read file.";
}
return -1;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = ftell(fp);
fseek(fp, 0, SEEK_SET);
if (filesize == 0) {
fclose(fp);
if (err) {
(*err) = "File size is zero.";
}
return -1;
}
std::vector<char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
(void)ret;
}
fclose(fp);
const char *head = &buf[0];
const char *marker = &buf[0];
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
if (err) {
(*err) = "Header mismatch.";
}
return -3;
}
marker += 4;
}
// Version, scanline.
{
// ver 2.0, scanline, deep bit on(0x800)
// must be [2, 0, 0, 0]
if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) {
if (err) {
(*err) = "Unsupported version or scanline.";
}
return -4;
}
marker += 4;
}
int dx = -1;
int dy = -1;
int dw = -1;
int dh = -1;
int numScanlineBlocks = 1; // 16 for ZIP compression.
int compressionType = -1;
int numChannels = -1;
std::vector<ChannelInfo> channels;
// Read attributes
for (;;) {
std::string attrName;
std::string attrType;
std::vector<unsigned char> data;
const char *marker_next = ReadAttribute(attrName, attrType, data, marker);
if (marker_next == NULL) {
marker++; // skip '\0'
break;
}
if (attrName.compare("compression") == 0) {
// must be 0:No compression, 1: RLE, 2: ZIPs or 3: ZIP
if (data[0] > 3) {
if (err) {
(*err) = "Unsupported compression type.";
}
return -5;
}
compressionType = data[0];
if (compressionType == 3) { // ZIP
numScanlineBlocks = 16;
}
} else if (attrName.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
ReadChannelInfo(channels, data);
numChannels = channels.size();
if (numChannels < 1) {
if (err) {
(*err) = "Invalid channels format.";
}
return -6;
}
} else if (attrName.compare("dataWindow") == 0) {
memcpy(&dx, &data.at(0), sizeof(int));
memcpy(&dy, &data.at(4), sizeof(int));
memcpy(&dw, &data.at(8), sizeof(int));
memcpy(&dh, &data.at(12), sizeof(int));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&dx));
swap4(reinterpret_cast<unsigned int *>(&dy));
swap4(reinterpret_cast<unsigned int *>(&dw));
swap4(reinterpret_cast<unsigned int *>(&dh));
}
} else if (attrName.compare("displayWindow") == 0) {
int x;
int y;
int w;
int h;
memcpy(&x, &data.at(0), sizeof(int));
memcpy(&y, &data.at(4), sizeof(int));
memcpy(&w, &data.at(8), sizeof(int));
memcpy(&h, &data.at(12), sizeof(int));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&x));
swap4(reinterpret_cast<unsigned int *>(&y));
swap4(reinterpret_cast<unsigned int *>(&w));
swap4(reinterpret_cast<unsigned int *>(&h));
}
}
marker = marker_next;
}
assert(dx >= 0);
assert(dy >= 0);
assert(dw >= 0);
assert(dh >= 0);
assert(numChannels >= 1);
int dataWidth = dw - dx + 1;
int dataHeight = dh - dy + 1;
std::vector<float> image(dataWidth * dataHeight * 4); // 4 = RGBA
// Read offset tables.
int numBlocks = dataHeight / numScanlineBlocks;
if (numBlocks * numScanlineBlocks < dataHeight) {
numBlocks++;
}
std::vector<long long> offsets(numBlocks);
for (int y = 0; y < numBlocks; y++) {
long long offset;
memcpy(&offset, marker, sizeof(long long));
if (IsBigEndian()) {
swap8(reinterpret_cast<unsigned long long *>(&offset));
}
marker += sizeof(long long); // = 8
offsets[y] = offset;
}
if (compressionType != 0 && compressionType != 2 && compressionType != 3) {
if (err) {
(*err) = "Unsupported format.";
}
return -10;
}
deepImage->image = (float ***)malloc(sizeof(float **) * numChannels);
for (int c = 0; c < numChannels; c++) {
deepImage->image[c] = (float **)malloc(sizeof(float *) * dataHeight);
for (int y = 0; y < dataHeight; y++) {
}
}
deepImage->offset_table = (int **)malloc(sizeof(int *) * dataHeight);
for (int y = 0; y < dataHeight; y++) {
deepImage->offset_table[y] = (int *)malloc(sizeof(int) * dataWidth);
}
for (int y = 0; y < numBlocks; y++) {
const unsigned char *dataPtr =
reinterpret_cast<const unsigned char *>(head + offsets[y]);
// int: y coordinate
// int64: packed size of pixel offset table
// int64: packed size of sample data
// int64: unpacked size of sample data
// compressed pixel offset table
// compressed sample data
int lineNo;
long long packedOffsetTableSize;
long long packedSampleDataSize;
long long unpackedSampleDataSize;
memcpy(&lineNo, dataPtr, sizeof(int));
memcpy(&packedOffsetTableSize, dataPtr + 4, sizeof(long long));
memcpy(&packedSampleDataSize, dataPtr + 12, sizeof(long long));
memcpy(&unpackedSampleDataSize, dataPtr + 20, sizeof(long long));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&lineNo));
swap8(reinterpret_cast<unsigned long long *>(&packedOffsetTableSize));
swap8(reinterpret_cast<unsigned long long *>(&packedSampleDataSize));
swap8(reinterpret_cast<unsigned long long *>(&unpackedSampleDataSize));
}
std::vector<int> pixelOffsetTable(dataWidth);
// decode pixel offset table.
{
unsigned long dstLen = pixelOffsetTable.size() * sizeof(int);
DecompressZip(reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)),
dstLen, dataPtr + 28, packedOffsetTableSize);
assert(dstLen == pixelOffsetTable.size() * sizeof(int));
for (int i = 0; i < dataWidth; i++) {
deepImage->offset_table[y][i] = pixelOffsetTable[i];
}
}
std::vector<unsigned char> sampleData(unpackedSampleDataSize);
// decode sample data.
{
unsigned long dstLen = unpackedSampleDataSize;
DecompressZip(reinterpret_cast<unsigned char *>(&sampleData.at(0)),
dstLen, dataPtr + 28 + packedOffsetTableSize,
packedSampleDataSize);
assert(dstLen == (unsigned long)unpackedSampleDataSize);
}
// decode sample
int sampleSize = -1;
std::vector<int> channelOffsetList(numChannels);
{
int channelOffset = 0;
for (int i = 0; i < numChannels; i++) {
channelOffsetList[i] = channelOffset;
if (channels[i].pixelType == TINYEXR_PIXELTYPE_UINT) { // UINT
channelOffset += 4;
} else if (channels[i].pixelType == TINYEXR_PIXELTYPE_HALF) { // half
channelOffset += 2;
} else if (channels[i].pixelType == TINYEXR_PIXELTYPE_FLOAT) { // float
channelOffset += 4;
} else {
assert(0);
}
}
sampleSize = channelOffset;
}
assert(sampleSize >= 2);
assert((size_t)(pixelOffsetTable[dataWidth - 1] * sampleSize) == sampleData.size());
int samplesPerLine = sampleData.size() / sampleSize;
//
// Alloc memory
//
//
// pixel data is stored as image[channels][pixel_samples]
//
{
unsigned long long dataOffset = 0;
for (int c = 0; c < numChannels; c++) {
deepImage->image[c][y] =
(float *)malloc(sizeof(float) * samplesPerLine);
if (channels[c].pixelType == 0) { // UINT
for (int x = 0; x < samplesPerLine; x++) {
unsigned int ui = *reinterpret_cast<unsigned int *>(
&sampleData.at(dataOffset + x * sizeof(int)));
deepImage->image[c][y][x] = (float)ui; // @fixme
}
dataOffset += sizeof(unsigned int) * samplesPerLine;
} else if (channels[c].pixelType == 1) { // half
for (int x = 0; x < samplesPerLine; x++) {
FP16 f16;
f16.u = *reinterpret_cast<unsigned short *>(
&sampleData.at(dataOffset + x * sizeof(short)));
FP32 f32 = half_to_float(f16);
deepImage->image[c][y][x] = f32.f;
}
dataOffset += sizeof(short) * samplesPerLine;
} else { // float
for (int x = 0; x < samplesPerLine; x++) {
float f = *reinterpret_cast<float *>(
&sampleData.at(dataOffset + x * sizeof(float)));
deepImage->image[c][y][x] = f;
}
dataOffset += sizeof(float) * samplesPerLine;
}
}
}
} // y
deepImage->width = dataWidth;
deepImage->height = dataHeight;
deepImage->channel_names =
(const char **)malloc(sizeof(const char *) * numChannels);
for (int c = 0; c < numChannels; c++) {
#ifdef _WIN32
deepImage->channel_names[c] = _strdup(channels[c].name.c_str());
#else
deepImage->channel_names[c] = strdup(channels[c].name.c_str());
#endif
}
deepImage->num_channels = numChannels;
return 0; // OK
}
int SaveDeepEXR(const DeepImage *deepImage, const char *filename,
const char **err) {
if (deepImage == NULL || filename == NULL) {
if (err) {
(*err) = "Invalid argument.";
}
return -1;
}
FILE *fp = fopen(filename, "rb");
if (!fp) {
if (err) {
(*err) = "Cannot write file.";
}
return -1;
}
// Write header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
size_t n = fwrite(header, 1, 4, fp);
if (n != 4) {
if (err) {
(*err) = "Header write failed.";
}
fclose(fp);
return -3;
}
}
// Version, scanline.
{
// ver 2.0, scanline, deep bit on(0x800)
const char data[] = {2, 8, 0, 0};
size_t n = fwrite(data, 1, 4, fp);
if (n != 4) {
if (err) {
(*err) = "Flag write failed.";
}
fclose(fp);
return -3;
}
}
// Write attributes.
{
int data = 2; // ZIPS
WriteAttribute(fp, "compression", "compression",
reinterpret_cast<const unsigned char *>(&data), sizeof(int));
}
{
int data[4] = {0, 0, deepImage->width - 1, deepImage->height - 1};
WriteAttribute(fp, "dataWindow", "box2i",
reinterpret_cast<const unsigned char *>(data),
sizeof(int) * 4);
WriteAttribute(fp, "displayWindow", "box2i",
reinterpret_cast<const unsigned char *>(data),
sizeof(int) * 4);
}
int numScanlineBlocks = 1;
// Write offset tables.
int numBlocks = deepImage->height / numScanlineBlocks;
if (numBlocks * numScanlineBlocks < deepImage->height) {
numBlocks++;
}
#if 0 // @todo
std::vector<long long> offsets(numBlocks);
//std::vector<int> pixelOffsetTable(dataWidth);
// compress pixel offset table.
{
unsigned long dstLen = pixelOffsetTable.size() * sizeof(int);
Compresses(reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)),
dstLen, dataPtr + 28, packedOffsetTableSize);
assert(dstLen == pixelOffsetTable.size() * sizeof(int));
// int ret =
// miniz::mz_uncompress(reinterpret_cast<unsigned char
// *>(&pixelOffsetTable.at(0)), &dstLen, dataPtr + 28,
// packedOffsetTableSize);
// printf("ret = %d, dstLen = %d\n", ret, (int)dstLen);
//
for (int i = 0; i < dataWidth; i++) {
// printf("offt[%d] = %d\n", i, pixelOffsetTable[i]);
deepImage->offset_table[y][i] = pixelOffsetTable[i];
}
}
for (int y = 0; y < numBlocks; y++) {
//long long offset = *(reinterpret_cast<const long long *>(marker));
// printf("offset[%d] = %lld\n", y, offset);
//marker += sizeof(long long); // = 8
offsets[y] = offset;
}
// Write offset table.
fwrite(&offsets.at(0), sizeof(long long), numBlocks, fp);
for (int y = 0; y < numBlocks; y++) {
const unsigned char *dataPtr =
reinterpret_cast<const unsigned char *>(head + offsets[y]);
// int: y coordinate
// int64: packed size of pixel offset table
// int64: packed size of sample data
// int64: unpacked size of sample data
// compressed pixel offset table
// compressed sample data
int lineNo = *reinterpret_cast<const int *>(dataPtr);
long long packedOffsetTableSize =
*reinterpret_cast<const long long *>(dataPtr + 4);
long long packedSampleDataSize =
*reinterpret_cast<const long long *>(dataPtr + 12);
long long unpackedSampleDataSize =
*reinterpret_cast<const long long *>(dataPtr + 20);
// printf("line: %d, %lld/%lld/%lld\n", lineNo, packedOffsetTableSize,
// packedSampleDataSize, unpackedSampleDataSize);
int endLineNo = (std::min)(lineNo + numScanlineBlocks, dataHeight);
int numLines = endLineNo - lineNo;
// printf("numLines: %d\n", numLines);
std::vector<int> pixelOffsetTable(dataWidth);
// decode pixel offset table.
{
unsigned long dstLen = pixelOffsetTable.size() * sizeof(int);
DecompressZip(reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)),
dstLen, dataPtr + 28, packedOffsetTableSize);
assert(dstLen == pixelOffsetTable.size() * sizeof(int));
// int ret =
// miniz::mz_uncompress(reinterpret_cast<unsigned char
// *>(&pixelOffsetTable.at(0)), &dstLen, dataPtr + 28,
// packedOffsetTableSize);
// printf("ret = %d, dstLen = %d\n", ret, (int)dstLen);
//
for (int i = 0; i < dataWidth; i++) {
// printf("offt[%d] = %d\n", i, pixelOffsetTable[i]);
deepImage->offset_table[y][i] = pixelOffsetTable[i];
}
}
std::vector<unsigned char> sampleData(unpackedSampleDataSize);
// decode sample data.
{
unsigned long dstLen = unpackedSampleDataSize;
// printf("dstLen = %d\n", dstLen);
// printf("srcLen = %d\n", packedSampleDataSize);
DecompressZip(reinterpret_cast<unsigned char *>(&sampleData.at(0)),
dstLen, dataPtr + 28 + packedOffsetTableSize,
packedSampleDataSize);
assert(dstLen == unpackedSampleDataSize);
}
// decode sample
int sampleSize = -1;
std::vector<int> channelOffsetList(numChannels);
{
int channelOffset = 0;
for (int i = 0; i < numChannels; i++) {
// printf("offt[%d] = %d\n", i, channelOffset);
channelOffsetList[i] = channelOffset;
if (channels[i].pixelType == 0) { // UINT
channelOffset += 4;
} else if (channels[i].pixelType == 1) { // half
channelOffset += 2;
} else if (channels[i].pixelType == 2) { // float
channelOffset += 4;
} else {
assert(0);
}
}
sampleSize = channelOffset;
}
assert(sampleSize >= 2);
assert(pixelOffsetTable[dataWidth - 1] * sampleSize == sampleData.size());
int samplesPerLine = sampleData.size() / sampleSize;
//
// Alloc memory
//
//
// pixel data is stored as image[channels][pixel_samples]
//
{
unsigned long long dataOffset = 0;
for (int c = 0; c < numChannels; c++) {
deepImage->image[c][y] =
(float *)malloc(sizeof(float) * samplesPerLine);
// unsigned int channelOffset = channelOffsetList[c];
// unsigned int i = channelOffset;
// printf("channel = %d. name = %s. ty = %d\n", c,
// channels[c].name.c_str(), channels[c].pixelType);
// printf("dataOffset = %d\n", (int)dataOffset);
if (channels[c].pixelType == 0) { // UINT
for (int x = 0; x < samplesPerLine; x++) {
unsigned int ui = *reinterpret_cast<unsigned int *>(
&sampleData.at(dataOffset + x * sizeof(int)));
deepImage->image[c][y][x] = (float)ui; // @fixme
}
dataOffset += sizeof(unsigned int) * samplesPerLine;
} else if (channels[c].pixelType == 1) { // half
for (int x = 0; x < samplesPerLine; x++) {
FP16 f16;
f16.u = *reinterpret_cast<unsigned short *>(
&sampleData.at(dataOffset + x * sizeof(short)));
FP32 f32 = half_to_float(f16);
deepImage->image[c][y][x] = f32.f;
// printf("c[%d] f(half) = %f (0x%08x)\n", c, f32.f, f16.u);
}
dataOffset += sizeof(short) * samplesPerLine;
} else { // float
for (int x = 0; x < samplesPerLine; x++) {
float f = *reinterpret_cast<float *>(
&sampleData.at(dataOffset + x * sizeof(float)));
// printf(" f = %f(0x%08x)\n", f, *((unsigned int *)&f));
deepImage->image[c][y][x] = f;
}
dataOffset += sizeof(float) * samplesPerLine;
}
}
// printf("total: %d\n", dataOffset);
}
} // y
#endif
fclose(fp);
return 0; // OK
}
void InitEXRImage(EXRImage *exrImage) {
if (exrImage == NULL) {
return;
}
exrImage->num_custom_attributes = 0;
exrImage->num_channels = 0;
exrImage->channel_names = NULL;
exrImage->images = NULL;
exrImage->pixel_types = NULL;
exrImage->requested_pixel_types = NULL;
}
int FreeEXRImage(EXRImage *exrImage) {
if (exrImage == NULL) {
return -1; // Err
}
for (int i = 0; i < exrImage->num_channels; i++) {
if (exrImage->channel_names && exrImage->channel_names[i]) {
free((char *)exrImage->channel_names[i]); // remove const
}
if (exrImage->images && exrImage->images[i]) {
free(exrImage->images[i]);
}
}
if (exrImage->channel_names) {
free(exrImage->channel_names);
}
if (exrImage->images) {
free(exrImage->images);
}
if (exrImage->pixel_types) {
free(exrImage->pixel_types);
}
if (exrImage->requested_pixel_types) {
free(exrImage->requested_pixel_types);
}
for (int i = 0; i < exrImage->num_custom_attributes; i++) {
if (exrImage->custom_attributes[i].name) {
free(exrImage->custom_attributes[i].name);
}
if (exrImage->custom_attributes[i].type) {
free(exrImage->custom_attributes[i].type);
}
if (exrImage->custom_attributes[i].value) {
free(exrImage->custom_attributes[i].value);
}
}
return 0;
}
int ParseMultiChannelEXRHeaderFromFile(EXRImage *exrImage, const char *filename,
const char **err) {
if (exrImage == NULL) {
if (err) {
(*err) = "Invalid argument.";
}
return -1;
}
FILE *fp = fopen(filename, "rb");
if (!fp) {
if (err) {
(*err) = "Cannot read file.";
}
return -1;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = ftell(fp);
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return ParseMultiChannelEXRHeaderFromMemory(exrImage, &buf.at(0), err);
}
int ParseMultiChannelEXRHeaderFromMemory(EXRImage *exrImage,
const unsigned char *memory,
const char **err) {
if (exrImage == NULL || memory == NULL) {
if (err) {
(*err) = "Invalid argument.";
}
return -1;
}
const char *buf = reinterpret_cast<const char *>(memory);
const char *marker = &buf[0];
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
if (err) {
(*err) = "Header mismatch.";
}
return -3;
}
marker += 4;
}
// Version, scanline.
{
// must be [2, 0, 0, 0]
if (marker[0] != 2 || marker[1] != 0 || marker[2] != 0 || marker[3] != 0) {
if (err) {
(*err) = "Unsupported version or scanline.";
}
return -4;
}
marker += 4;
}
int dx = -1;
int dy = -1;
int dw = -1;
int dh = -1;
int numChannels = -1;
int displayWindow[4] = {-1, -1, -1, -1}; // @fixme.
float screenWindowCenter[2] = {0.0f, 0.0f}; // @fixme
float screenWindowWidth = 1.0f; // @fixme
float pixelAspectRatio = 1.0f;
unsigned char lineOrder = 0; // 0 -> increasing y; 1 -> decreasing
std::vector<ChannelInfo> channels;
int numCustomAttributes = 0;
std::vector<EXRAttribute> customAttribs;
// Read attributes
for (;;) {
std::string attrName;
std::string attrType;
std::vector<unsigned char> data;
const char *marker_next = ReadAttribute(attrName, attrType, data, marker);
if (marker_next == NULL) {
marker++; // skip '\0'
break;
}
if (attrName.compare("compression") == 0) {
// must be 0:No compression, 1: RLE, 2: ZIPs, 3: ZIP or 4: PIZ
if (data[0] > 4) {
if (err) {
(*err) = "Unsupported compression type.";
}
return -5;
}
} else if (attrName.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
ReadChannelInfo(channels, data);
numChannels = channels.size();
if (numChannels < 1) {
if (err) {
(*err) = "Invalid channels format.";
}
return -6;
}
} else if (attrName.compare("dataWindow") == 0) {
memcpy(&dx, &data.at(0), sizeof(int));
memcpy(&dy, &data.at(4), sizeof(int));
memcpy(&dw, &data.at(8), sizeof(int));
memcpy(&dh, &data.at(12), sizeof(int));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&dx));
swap4(reinterpret_cast<unsigned int *>(&dy));
swap4(reinterpret_cast<unsigned int *>(&dw));
swap4(reinterpret_cast<unsigned int *>(&dh));
}
} else if (attrName.compare("displayWindow") == 0) {
memcpy(&displayWindow[0], &data.at(0), sizeof(int));
memcpy(&displayWindow[1], &data.at(4), sizeof(int));
memcpy(&displayWindow[2], &data.at(8), sizeof(int));
memcpy(&displayWindow[3], &data.at(12), sizeof(int));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&displayWindow[0]));
swap4(reinterpret_cast<unsigned int *>(&displayWindow[1]));
swap4(reinterpret_cast<unsigned int *>(&displayWindow[2]));
swap4(reinterpret_cast<unsigned int *>(&displayWindow[3]));
}
} else if (attrName.compare("lineOrder") == 0) {
memcpy(&lineOrder, &data.at(0), sizeof(lineOrder));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&lineOrder));
}
} else if (attrName.compare("pixelAspectRatio") == 0) {
memcpy(&pixelAspectRatio, &data.at(0), sizeof(float));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&pixelAspectRatio));
}
} else if (attrName.compare("screenWindowCenter") == 0) {
memcpy(&screenWindowCenter[0], &data.at(0), sizeof(float));
memcpy(&screenWindowCenter[1], &data.at(4), sizeof(float));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&screenWindowCenter[0]));
swap4(reinterpret_cast<unsigned int *>(&screenWindowCenter[1]));
}
} else if (attrName.compare("screenWindowWidth") == 0) {
memcpy(&screenWindowWidth, &data.at(0), sizeof(float));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&screenWindowWidth));
}
} else {
// Custom attribute(up to TINYEXR_MAX_ATTRIBUTES)
if (numCustomAttributes < TINYEXR_MAX_ATTRIBUTES) {
EXRAttribute attrib;
attrib.name = strdup(attrName.c_str());
attrib.type = strdup(attrType.c_str());
attrib.size = data.size();
attrib.value = (unsigned char*)malloc(data.size());
memcpy((char*)attrib.value, &data.at(0), data.size());
customAttribs.push_back(attrib);
}
}
marker = marker_next;
}
assert(dx >= 0);
assert(dy >= 0);
assert(dw >= 0);
assert(dh >= 0);
assert(numChannels >= 1);
int dataWidth = dw - dx + 1;
int dataHeight = dh - dy + 1;
{
exrImage->channel_names =
(const char **)malloc(sizeof(const char *) * numChannels);
for (int c = 0; c < numChannels; c++) {
#ifdef _WIN32
exrImage->channel_names[c] = _strdup(channels[c].name.c_str());
#else
exrImage->channel_names[c] = strdup(channels[c].name.c_str());
#endif
}
exrImage->num_channels = numChannels;
exrImage->width = dataWidth;
exrImage->height = dataHeight;
exrImage->pixel_aspect_ratio = pixelAspectRatio;
exrImage->screen_window_center[0] = screenWindowCenter[0];
exrImage->screen_window_center[1] = screenWindowCenter[1];
exrImage->screen_window_width = screenWindowWidth;
exrImage->display_window[0] = displayWindow[0];
exrImage->display_window[1] = displayWindow[1];
exrImage->display_window[2] = displayWindow[2];
exrImage->display_window[3] = displayWindow[3];
exrImage->data_window[0] = dx;
exrImage->data_window[1] = dy;
exrImage->data_window[2] = dw;
exrImage->data_window[3] = dh;
exrImage->line_order = lineOrder;
exrImage->pixel_types = (int *)malloc(sizeof(int) * numChannels);
for (int c = 0; c < numChannels; c++) {
exrImage->pixel_types[c] = channels[c].pixelType;
}
// Initially fill with values of `pixel-types`
exrImage->requested_pixel_types = (int *)malloc(sizeof(int) * numChannels);
for (int c = 0; c < numChannels; c++) {
exrImage->requested_pixel_types[c] = channels[c].pixelType;
}
}
if (numCustomAttributes > 0) {
assert(customAttribs.size() < TINYEXR_MAX_ATTRIBUTES);
exrImage->num_custom_attributes = numCustomAttributes;
for (int i = 0; i < (int)customAttribs.size(); i++) {
exrImage->custom_attributes[i] = customAttribs[i];
}
}
return 0; // OK
}
#endif
#endif // __TINYEXR_H__
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 32;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
ScatterHelper.h | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
// @author Yurii Shyrma (iuriish@yahoo.com)
//
#include <pointercast.h>
#include <op_boilerplate.h>
#include <NDArray.h>
#include <numeric>
namespace nd4j {
namespace ops {
class ScatterHelper {
public:
// static FORCEINLINE Nd4jStatus scatterApply(pairwise::Ops op, NDArray* output, NDArray* indices, NDArray* updates) {
// auto input = output;
// int indicesLength = (int) indices->lengthOf();
// if ((indices->isVector() && input->isVector() && updates->isVector()) ||
// (input->isScalar() && input->isScalar() && updates->isScalar()) ||
// (input->isVector() && indices->isScalar() && updates->isScalar()) ) {
// for (int e = 0; e < indicesLength; e++) {
// int idx = indices->e<int>(e);
// T t0 = input->e<T>(idx);
// T t1 = updates->e<T>(e);
// output->p(idx, op(t0, t1, nullptr));
// }
// return Status::OK();
// } else if (indices->isVector() || indices->isScalar()) {
// std::vector<int> idc;
// std::vector<int> idcU;
// for (int e = 0; e < indicesLength; e++) {
// idc.push_back(indices->e<int>(e));
// idcU.push_back(e);
// }
// std::vector<int> tadDimension = ShapeUtils::convertAxisToTadTarget(input->rankOf(), {0});
// auto tadsOperand = output->multipleTensorsAlongDimension(idc, tadDimension);
// auto tadsUpdate = updates->multipleTensorsAlongDimension(idcU, tadDimension);
// auto z0 = tadsOperand->at(0);
// auto z1 = tadsUpdate->at(0);
// REQUIRE_TRUE(z0->isSameShape(z1), 0, "scatter_add: updates shapes should match");
// for (int e = 0; e < tadsOperand->size(); e++) {
// auto t0 = tadsOperand->at(e);
// auto t1 = tadsUpdate->at(e);
// t0->template applyPairwiseTransform(op, *t1, nullptr);
// }
// delete tadsOperand;
// delete tadsUpdate;
// return Status::OK();
// } else if (indices->isMatrix() || indices->rankOf() >= 2) {
// auto _input = input->reshape(input->ordering(), {input->sizeAt(0), -1});
// auto _updates = updates->reshape(updates->ordering(), {indicesLength, (int) updates->lengthOf() / indicesLength});
// auto tadsOperand = _input->allTensorsAlongDimension({1});
// auto tadsUpdates = _updates->allTensorsAlongDimension({1});
// for (int e = 0; e < indicesLength; e++) {
// int idx = indices->e<int>(e);
// auto t0 = tadsOperand->at(idx);
// auto t1 = tadsUpdates->at(e);
// t0->template applyPairwiseTransform(op, *t1, nullptr);
// }
// delete _input;
// delete _updates;
// delete tadsOperand;
// delete tadsUpdates;
// return Status::OK();
// }
// return Status::THROW("ScatterHelper failed");
// }
////////////////////////////////////////////////////////////////////////
static FORCEINLINE void scatter(pairwise::Ops op, const NDArray& indices, const NDArray& updates, NDArray& output) {
const int outRank = output.rankOf();
const int indRank = indices.rankOf();
const int updRank = updates.rankOf();
const Nd4jLong indLen = indices.lengthOf();
if(outRank == 1) {
// #pragma omp parallel for if(indLen > Environment::getInstance()->elementwiseThreshold()) schedule(guided)
#pragma omp parallel for schedule(guided)
for(Nd4jLong i = 0; i < indLen; ++i) {
Nd4jLong idx = indices.e<Nd4jLong>(i);
NDArray out = output({idx, idx+1});
#pragma omp critical
out.applyPairwiseTransform(op, updates.e(i), nullptr);
}
}
else { // outRank > 1
int sizeOfDims = indRank;
if(outRank == updRank && indices.isVector())
sizeOfDims = 1;
std::vector<int> dimsToExcludeUpd(sizeOfDims);
std::iota(dimsToExcludeUpd.begin(), dimsToExcludeUpd.end(), 0);
// #pragma omp parallel for if(indLen > Environment::getInstance()->elementwiseThreshold()) schedule(guided) // causes known openMP asan bug !
// #pragma omp parallel for schedule(guided)
for(Nd4jLong i = 0; i < indLen; ++i) {
NDArray outSubArr = output(indices.e<Nd4jLong>(i), std::vector<int>({0}));
NDArray updSubArr = updates(i, dimsToExcludeUpd);
#pragma omp critical
outSubArr.applyPairwiseTransform(op, updSubArr, nullptr);
}
}
}
////////////////////////////////////////////////////////////////////////
static FORCEINLINE void scatterND(pairwise::Ops op, const NDArray& indices, const NDArray& updates, NDArray& output) {
const Nd4jLong indLen = indices.lengthOf();
const int outRank = output.rankOf();
const int indRank = indices.rankOf();
const Nd4jLong indLastDim = indices.sizeAt(-1);
if(outRank == 1) {
// #pragma omp parallel for if(indLen > Environment::getInstance()->elementwiseThreshold()) schedule(guided)
#pragma omp parallel for schedule(guided)
for(Nd4jLong i = 0; i < indLen; ++i) {
Nd4jLong idx = indices.e<Nd4jLong>(i);
NDArray out = output({idx, idx+1});
#pragma omp critical
out.applyPairwiseTransform(op, updates.e(i), nullptr);
}
}
else {
std::vector<int> dimsToExcludeInd = ShapeUtils::evalDimsToExclude(indRank, {indRank-1});
std::vector<int> dimsToExcludeUpd(indRank - 1);
std::iota(dimsToExcludeUpd.begin(), dimsToExcludeUpd.end(), 0);
std::vector<Nd4jLong> idxRangeOut(2*outRank, 0);
// #pragma omp parallel for if(indLen/indLastDim > Environment::getInstance()->elementwiseThreshold()) schedule(guided) firstprivate(idxRangeOut)
#pragma omp parallel for schedule(guided) firstprivate(idxRangeOut)
for(Nd4jLong i = 0; i < indLen/indLastDim; ++i) {
NDArray indSubArr = indices(i, dimsToExcludeInd);
for(Nd4jLong j = 0; j < indLastDim; ++j) {
idxRangeOut[2*j] = indSubArr.e<Nd4jLong>(j);
idxRangeOut[2*j + 1] = idxRangeOut[2*j] + 1;
}
NDArray outSubArr = output(idxRangeOut);
NDArray updSubArr = updates(i, dimsToExcludeUpd);
#pragma omp critical
outSubArr.applyPairwiseTransform(op, updSubArr, nullptr);
}
}
}
////////////////////////////////////////////////////////////////////////
static FORCEINLINE void scatterForLoss(const NDArray& indices, const NDArray& updates, NDArray& output, const bool calcGrad) {
// requirements for arrays
// shapes of updates and output must be the same
// shape of indices should be the same as updates shape with last dimension excluded
// for example if updates is {a,b,c} then indices should be {a,b}
const Nd4jLong indicesLen = indices.lengthOf();
std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(updates.rankOf(), {-1});
if(!calcGrad) {
#pragma omp parallel for schedule(guided)
for(Nd4jLong i = 0; i < indicesLen; ++i) {
auto subArr = updates(i, dimsToExclude);
output.p(i, subArr.e(indices.e<Nd4jLong>(i)));
}
}
else {
#pragma omp parallel for schedule(guided)
for(Nd4jLong i = 0; i < indicesLen; ++i) {
auto subArr = updates(i, dimsToExclude);
auto ind = indices.e<Nd4jLong>(i);
subArr.p(ind, subArr.e(ind) - 1.);
}
}
}
};
}
}
|
normMat.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "compearth.h"
#ifdef COMPEARTH_USE_MKL
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wreserved-id-macro"
#pragma clang diagnostic ignored "-Wstrict-prototypes"
#endif
#include <mkl_cblas.h>
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#else
#include <cblas.h>
#endif
static double compearth_norm64f(const int n, const double *__restrict__ x,
const enum ceNormType_enum norm,
const double p, int *ierr);
/*!
* @brief Computes the matrix (Frobenius) norm for a matrix
*
* @param[in] n number of matrices
* @param[in] M 3 x 3 input matrix as a length 9 array [9*n]
* @param[in] Lnorm matrix norm
* TWO_NORM (2)
* ONE_NORM (1)
* P_NORM (in this case must set p)
* INFINITY_NORM
* NEGATIVE_INFINITY_NORM
* @param[in] p if using a p-norm this is the value for p (> 0)
*
* @param[out] mnorm matrix norms for each matrix
*
* @result 0 indicates success
*
*/
int compearth_normMat(const int n,
const double *__restrict__ M,
const enum ceNormType_enum Lnorm,
const double p,
double *__restrict__ mnorm)
{
int i, ierr;
ierr = 0;
for (i=0; i<n; i++)
{
mnorm[i] = compearth_norm64f(9, &M[9*i], Lnorm, p, &ierr);
if (ierr != 0)
{
fprintf(stderr, "%s: Error computing matrix norm!\n", __func__);
mnorm[i] = 0.0;
break;
}
}
return ierr;
}
/*!
* @brief Computes the norm of a vector. This is from ISTI's ISCL.
*
* @param[in] n Length of array x.
* @param[in] x Array of dimension [n] of which to compute norm.
* @param[in] norm Type of norm to compute.
* @param[in] p If performing a P norm then this must be defined to
* a real number greater than or equal to 1. Otherwise,
* it will not be accessed.
*
* @param[out] ierr 0 indicates success
*
* @result P-norm of array x.
*
* @author Ben Baker
*
* @date August 2017 - redefined variables so that this may exist in
* compearth without collisions with ISCL.
*
*/
static double compearth_norm64f(const int n, const double *__restrict__ x,
const enum ceNormType_enum norm,
const double p, int *ierr)
{
double xnorm;
int i;
*ierr = 0;
xnorm = 0.0;
if (n < 1 || x == NULL)
{
if (n < 1){fprintf(stderr, "%s: Error no data\n", __func__);}
if (x == NULL){fprintf(stderr, "%s: Error x is NULL\n", __func__);}
*ierr = 1;
return xnorm;
}
// 2 norm
if (norm == CE_TWO_NORM)
{
xnorm = cblas_dnrm2(n, x, 1);
// 1 norm
}
else if (norm == CE_ONE_NORM)
{
xnorm = cblas_dasum(n, x, 1);
}
// p norm
else if (norm == CE_P_NORM)
{
if (p <= 0.0)
{
fprintf(stderr, "%s: Invalid p value %f\n", __func__, p);
*ierr = 1;
return xnorm;
}
#pragma omp simd reduction(+:xnorm)
for (i=0; i<n; i++)
{
xnorm = xnorm + pow(fabs(x[i]), p);
}
xnorm = pow(xnorm, 1./p);
}
// infinity norm
else if (norm == CE_INFINITY_NORM)
{
xnorm = fabs(x[cblas_idamax(n, x, 1)]);
}
// negative infinity norm
else if (norm == CE_NEGATIVE_INFINITY_NORM)
{
xnorm = fabs(x[0]);
#pragma omp simd reduction(min:xnorm)
for (i=1; i<n; i++)
{
xnorm = fmin(fabs(x[i]), xnorm);
}
}
// not sure - default to 2-norm
else
{
fprintf(stderr, "%s: Defaulting to 2-norm\n", __func__);
xnorm = cblas_dnrm2(n, x, 1);
}
return xnorm;
}
|
DRB054-inneronly2-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Example with loop-carried data dependence at the outer level loop.
The inner level loop can be parallelized.
*/
int main()
{
int i,j;
int n=100, m=100;
double b[n][m];
for(i=0;i<n; i++)
for(j=0;j<n; j++)
b[i][j]=(double)(i*j);
for (i=1;i<n;i++)
#pragma omp parallel for
for (j=1;j<m;j++)
b[i][j]=b[i-1][j-1];
return 0;
}
|
crypt-sha1_fmt_plug.c | /*
* This file is based on the "cryptsha512_fmt_plug.c" file.
*
* This software is Copyright (c) 2014 Dhiru Kholia, and it is hereby released
* to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* Enhanced code (dropped usage of the Gladman hmac code), and addition of SSE2
* logic, Aug 2014, JimF.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_cryptsha1;
#elif FMT_REGISTERS_H
john_register_one(&fmt_cryptsha1);
#else
#include <string.h>
#ifdef _OPENMP
#define OMP_SCALE 16 // untested
#include <omp.h>
#endif
#include "arch.h"
#include "sha.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#define PBKDF1_LOGIC 1
#include "pbkdf2_hmac_sha1.h"
#include "base64_convert.h"
#include "memdbg.h"
#define SHA1_MAGIC "$sha1$"
#define SHA1_SIZE 20
#define FORMAT_LABEL "sha1crypt"
#define FORMAT_NAME "NetBSD's sha1crypt"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1001
#define BINARY_SIZE 20
// max valid salt len in hash. Final salt 'used' is larger, by length of "$sha1$" and length of base10 string of rounds
#define SALT_LENGTH 64
#ifdef MMX_COEF
#define ALGORITHM_NAME "PBKDF1-SHA1 " SHA1_N_STR MMX_TYPE
#else
#define ALGORITHM_NAME "PBKDF1-SHA1 " ARCH_BITS_STR "/" ARCH_BITS_STR
#endif
#define PLAINTEXT_LENGTH 125
#define CHECKSUM_LENGTH 28
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct saltstruct)
#define SALT_ALIGN 4
#ifdef MMX_COEF
#define MIN_KEYS_PER_CRYPT MMX_COEF
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
/* An example hash (of password) is $sha1$40000$jtNX3nZ2$hBNaIXkt4wBI2o5rsi8KejSjNqIq.
* An sha1-crypt hash string has the format $sha1$rounds$salt$checksum, where:
*
* $sha1$ is the prefix used to identify sha1-crypt hashes, following the Modular Crypt Format
* rounds is the decimal number of rounds to use (40000 in the example).
* salt is 0-64 characters drawn from [./0-9A-Za-z] (jtNX3nZ2 in the example).
* checksum is 28 characters drawn from the same set, encoding a 168-bit checksum.
*/
static struct fmt_tests tests[] = {
{"$sha1$64000$wnUR8T1U$vt1TFQ50tBMFgkflAFAOer2CwdYZ", "password"},
{"$sha1$40000$jtNX3nZ2$hBNaIXkt4wBI2o5rsi8KejSjNqIq", "password"},
{"$sha1$64000$wnUR8T1U$wmwnhQ4lpo/5isi5iewkrHN7DjrT", "123456"},
{"$sha1$64000$wnUR8T1U$azjCegpOIk0FjE61qzGWhdkpuMRL", "complexlongpassword@123456"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static struct saltstruct {
unsigned int length;
unsigned int rounds;
unsigned char salt[SALT_LENGTH+sizeof(SHA1_MAGIC)+7+1]; // allows up to 9999999 sized rounds with 64 byte salt.
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; }
static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; }
static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; }
static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; }
static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; }
static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; }
static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; }
static int valid(char * ciphertext, struct fmt_main * self) {
char *p, *keeptr, tst[24];
unsigned rounds;
if (strncmp(ciphertext, SHA1_MAGIC, sizeof(SHA1_MAGIC) - 1))
return 0;
// validate rounds
keeptr = strdup(ciphertext);
p = &keeptr[sizeof(SHA1_MAGIC)-1];
if ((p = strtok(p, "$")) == NULL) /* rounds */
goto err;
rounds = strtoul(p, NULL, 10);
sprintf(tst, "%u", rounds);
if (strcmp(tst, p))
goto err;
// validate salt
if ((p = strtok(NULL, "$")) == NULL) /* salt */
goto err;
if (strlen(p) > SALT_LENGTH || strlen(p) != base64_valid_length(p, e_b64_crypt, 0))
goto err;
// validate checksum
if ((p = strtok(NULL, "$")) == NULL) /* checksum */
goto err;
if (strlen(p) > CHECKSUM_LENGTH || strlen(p) != base64_valid_length(p, e_b64_crypt, 0))
goto err;
if (strtok(NULL, "$"))
goto err;
MEM_FREE(keeptr);
return 1;
err:;
MEM_FREE(keeptr);
return 0;
}
#define TO_BINARY(b1, b2, b3) \
value = (ARCH_WORD_32)atoi64[ARCH_INDEX(pos[0])] | \
((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[1])] << 6) | \
((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[2])] << 12) | \
((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[3])] << 18); \
pos += 4; \
out[b1] = value >> 16; \
out[b2] = value >> 8; \
out[b3] = value;
static void * get_binary(char * ciphertext)
{ static union {
unsigned char c[BINARY_SIZE + 16];
ARCH_WORD dummy;
ARCH_WORD_32 swap[1];
} buf;
unsigned char *out = buf.c;
ARCH_WORD_32 value;
char *pos = strrchr(ciphertext, '$') + 1;
int i = 0;
do {
TO_BINARY(i, i + 1, i + 2);
i = i + 3;
} while (i <= 18);
#if (ARCH_LITTLE_ENDIAN==0)
for (i = 0; i < sizeof(buf.c)/4; ++i) {
buf.swap[i] = JOHNSWAP(buf.swap[i]);
}
#endif
return (void *)out;
}
static void set_key(char *key, int index)
{
int len = strlen(key);
if (len > PLAINTEXT_LENGTH)
len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, len);
saved_key[index][len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
#ifdef SSE_GROUP_SZ_SHA1
int lens[SSE_GROUP_SZ_SHA1], i;
unsigned char *pin[SSE_GROUP_SZ_SHA1];
union {
ARCH_WORD_32 *pout[SSE_GROUP_SZ_SHA1];
unsigned char *poutc;
} x;
for (i = 0; i < SSE_GROUP_SZ_SHA1; ++i) {
lens[i] = strlen(saved_key[index+i]);
pin[i] = (unsigned char*)saved_key[index+i];
x.pout[i] = crypt_out[index+i];
}
pbkdf1_sha1_sse((const unsigned char **)pin, lens,
cur_salt->salt, cur_salt->length,
cur_salt->rounds, &(x.poutc),
BINARY_SIZE, 0);
#else
pbkdf1_sha1((const unsigned char*)(saved_key[index]),
strlen(saved_key[index]),
cur_salt->salt, cur_salt->length,
cur_salt->rounds, (unsigned char*)crypt_out[index],
BINARY_SIZE, 0);
#endif
}
return count;
}
static void set_salt(void *salt)
{
cur_salt = salt;
}
static void *get_salt(char *ciphertext)
{
static struct saltstruct out;
char tmp[sizeof(out.salt)];
char *p;
memset(&out, 0, sizeof(out));
p = strrchr(ciphertext, '$') + 1;
strnzcpy(tmp, ciphertext, p - ciphertext);
out.rounds = strtoul(&ciphertext[sizeof(SHA1_MAGIC)-1], NULL, 10);
// point p to the salt value, BUT we have to decorate the salt for this hash.
p = strrchr(tmp, '$') + 1;
// real salt used is: <salt><magic><iterations>
out.length = snprintf((char*)out.salt, sizeof(out.salt), "%.*s%s%u", (int)strlen(p), p, SHA1_MAGIC, out.rounds);
return &out;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], BINARY_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
// Public domain hash function by DJ Bernstein
// We are hashing the entire struct
static int salt_hash(void *salt)
{
unsigned char *s = salt;
unsigned int hash = 5381;
unsigned int i;
for (i = 0; i < SALT_SIZE; i++)
hash = ((hash << 5) + hash) ^ s[i];
return hash & (SALT_HASH_SIZE - 1);
}
#if FMT_MAIN_VERSION > 11
static unsigned int iteration_count(void *salt)
{
struct saltstruct *p = (struct saltstruct *)salt;
return p->rounds;
}
#endif
struct fmt_main fmt_cryptsha1 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{
"iteration count",
},
#endif
tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{
iteration_count,
},
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
TomoP2DModel_core.c | /*
* Copyright 2017 Daniil Kazantsev
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "TomoP2DModel_core.h"
#define M_PI 3.14159265358979323846
#define MAXCHAR 1000
/* Functions to build spatial (2D) and temporal (2D +time) phantoms from the library of models: Phantom2DLibrary.dat
*
* Input Parameters:
* 1. ModelNo - the model number from Phantom3DLibrary file
* 2. VolumeSize in voxels (N x N)
* 3. Object - Analytical Model
* 4. C0 - intensity
* 5. x0 - x0 position
* 6. y0 - y0 position
* 7. a - size object
* 8. b - size object
* 9. phi_rot - rotation angle
*
* Output:
* 1. The analytical phantom size of [N x N] or a temporal phantom size of [N xN x Time-frames]
*/
/* function to build a single object */
float TomoP2DObject_core(float *A, int N, char *Object,
float C0, /* intensity */
float x0, /* x0 position */
float y0, /* y0 position */
float a , /* a - size object */
float b , /* b - size object */
float phi_rot, /* phi - rotation angle */
int tt /* time frame loop */)
{
// printf ("Base C0 %.2e x0 %.2e y0 %.2e a %.2e b %.2e phi %.2e\n" , C0, x0, y0, a, b, phi_rot);
int i, j;
float *Tomorange_X_Ar=NULL, Tomorange_Xmin, Tomorange_Xmax, H_x, C1, a2, b2, phi_rot_radian, sin_phi, cos_phi;
float *Xdel = NULL, *Ydel = NULL, T;
Tomorange_X_Ar = malloc(N*sizeof(float));
Tomorange_Xmin = -1.0f;
Tomorange_Xmax = 1.0f;
H_x = (Tomorange_Xmax - Tomorange_Xmin)/(N);
for(i=0; i<N; i++) {Tomorange_X_Ar[i] = Tomorange_Xmin + (float)i*H_x;}
C1 = -4.0f*logf(2.0f);
/************************************************/
phi_rot_radian = phi_rot*((float)M_PI/180.0f);
sin_phi=sinf(phi_rot_radian); cos_phi=cosf(phi_rot_radian);
Xdel = malloc(N*sizeof(float));
Ydel = malloc(N*sizeof(float));
for(i=0; i<N; i++) {
Xdel[i] = Tomorange_X_Ar[i] - x0;
Ydel[i] = Tomorange_X_Ar[i] - y0;
}
a2 = 1.0f/(a*a);
b2 = 1.0f/(b*b);
/* all parameters of an object have been extracted, now run the building modules */
if (strcmp("gaussian",Object) == 0) {
/* The object is a gaussian */
#pragma omp parallel for shared(A) private(i,j,T)
for(i=0; i<N; i++) {
for(j=0; j<N; j++) {
T = C1*(a2*powf((Xdel[i]*cos_phi + Ydel[j]*sin_phi),2) + b2*powf((-Xdel[i]*sin_phi + Ydel[j]*cos_phi),2));
A[tt*N*N + j*N+i] += C0*expf(T);
}}
}
else if (strcmp("parabola",Object) == 0) {
/* the object is a parabola Lambda = 1/2 */
#pragma omp parallel for shared(A) private(i,j,T)
for(i=0; i<N; i++) {
for(j=0; j<N; j++) {
T = a2*powf((Xdel[i]*cos_phi + Ydel[j]*sin_phi),2) + b2*powf((-Xdel[i]*sin_phi + Ydel[j]*cos_phi),2);
if (T <= 1) T = C0*sqrtf(1.0f - T);
else T = 0.0f;
A[tt*N*N + j*N+i] += T;
}}
}
else if (strcmp("ellipse",Object) == 0) {
/* the object is an elliptical disk */
#pragma omp parallel for shared(A) private(i,j,T)
for(i=0; i<N; i++) {
for(j=0; j<N; j++) {
T = a2*powf((Xdel[i]*cos_phi + Ydel[j]*sin_phi),2) + b2*powf((-Xdel[i]*sin_phi + Ydel[j]*cos_phi),2);
if (T <= 1) T = C0;
else T = 0.0f;
A[tt*N*N + j*N+i] += T;
}}
}
else if (strcmp("parabola1",Object) == 0) {
/* the object is a parabola Lambda = 1*/
#pragma omp parallel for shared(A) private(i,j,T)
for(i=0; i<N; i++) {
for(j=0; j<N; j++) {
T = (4.0f*a2)*powf((Xdel[i]*cos_phi + Ydel[j]*sin_phi),2) + (4.0f*b2)*powf((-Xdel[i]*sin_phi + Ydel[j]*cos_phi),2);
if (T <= 1) T = C0*sqrtf(1.0f - T);
else T = 0.0f;
A[tt*N*N + j*N+i] += T;
}}
}
else if (strcmp("cone",Object) == 0) {
/*the object is a cone*/
#pragma omp parallel for shared(A) private(i,j,T)
for(i=0; i<N; i++) {
for(j=0; j<N; j++) {
T = a2*powf((Xdel[i]*cos_phi + Ydel[j]*sin_phi),2) + b2*powf((-Xdel[i]*sin_phi + Ydel[j]*cos_phi),2);
if (T <= 1) T = C0*(1.0f - sqrtf(T));
else T = 0.0f;
A[tt*N*N + j*N+i] += T;
}}
}
else if (strcmp("rectangle",Object) == 0) {
/* the object is a rectangle */
float x0r, y0r, HX, HY;
a2 = 0.5f*a;
b2 = 0.5f*b;
x0r=x0*cosf(0.0f) + y0*sinf(0.0f);
y0r=-x0*sinf(0.0f) + y0*cosf(0.0f);
if (phi_rot_radian < 0.0f) {
phi_rot_radian = (float)M_PI + phi_rot_radian;
sin_phi=sinf(phi_rot_radian);
cos_phi=cosf(phi_rot_radian);
}
#pragma omp parallel for shared(A) private(i,j,HX,HY,T)
for(i=0; i<N; i++) {
for(j=0; j<N; j++) {
HX = fabsf((Xdel[i] - x0r)*sin_phi + (Ydel[j] - y0r)*cos_phi);
T = 0.0f;
if (HX <= a2) {
HY = fabsf((Ydel[j] - y0r)*sin_phi - (Xdel[i] - x0r)*cos_phi);
if (HY <= b2) {T = C0;}
}
A[tt*N*N + j*N+i] += T;
}}
}
else {
return 0;
}
free(Xdel); free(Ydel);
/************************************************/
free(Tomorange_X_Ar);
return *A;
}
float TomoP2DModel_core(float *A, int ModelSelected, int N, char *ModelParametersFilename)
{
FILE *fp = fopen(ModelParametersFilename, "r"); // read parameters file
int Model=0, Components=0, steps = 0, counter=0, ii;
float C0 = 0.0f, x0 = 0.0f, y0 = 0.0f, a = 0.0f, b = 0.0f, psi_gr1 = 0.0f;
if( fp == NULL ) {
printf("%s \n","Cannot open the model library file (Phantom2DLibrary.dat)");
}
else {
char str[MAXCHAR];
char tmpstr1[16];
char tmpstr2[22];
char tmpstr3[16];
char tmpstr4[16];
char tmpstr5[16];
char tmpstr6[16];
char tmpstr7[16];
char tmpstr8[16];
while (fgets(str, MAXCHAR, fp) != NULL)
{
/* work with non-# commented lines */
if(str[0] != '#') {
sscanf(str, "%15s : %21[^;];", tmpstr1, tmpstr2);
if (strcmp(tmpstr1,"Model")==0)
{
Model = atoi(tmpstr2);
if ((ModelSelected == Model) && (counter == 0)) {
/* check if we have a right model */
if (fgets(str, MAXCHAR, fp) != NULL) sscanf(str, "%15s : %21[^;];", tmpstr1, tmpstr2);
else {
//mexErrMsgTxt("Unexpected the end of the line (Components) in parameters file");
break; }
if (strcmp(tmpstr1,"Components") == 0) Components = atoi(tmpstr2);
//printf("%s %i\n", "Components:", Components);
if (Components <= 0) {
// printf("%s %i\n", "Components cannot be negative, the given value is", Components);
// mexErrMsgTxt("Components cannot be negative");
break; }
if (fgets(str, MAXCHAR, fp) != NULL) sscanf(str, "%15s : %21[^;];", tmpstr1, tmpstr2);
else {
//mexErrMsgTxt("Unexpected the end of the line (TimeSteps) in parameters file");
break; }
if (strcmp(tmpstr1,"TimeSteps") == 0) steps = atoi(tmpstr2);
if (steps <= 0) {
// printf("%s %i\n", "TimeSteps cannot be negative, the given value is", steps);
//mexErrMsgTxt("TimeSteps cannot be negative");
break; }
//printf("%s %i\n", "TimeSteps:", steps);
if (steps == 1) {
/**************************************************/
// printf("\n %s %i %s \n", "Stationary 2D model", ModelSelected, " is selected");
/* loop over all components */
for(ii=0; ii<Components; ii++) {
if (fgets(str, MAXCHAR, fp) != NULL) sscanf(str, "%15s : %21s %15s %15s %15s %15s %15s %15[^;];", tmpstr1, tmpstr2, tmpstr3, tmpstr4, tmpstr5, tmpstr6, tmpstr7, tmpstr8);
else {
//mexErrMsgTxt("Unexpected the end of the line (objects loop) in parameters file");
break; }
if (strcmp(tmpstr1,"Object") == 0) {
C0 = (float)atof(tmpstr3); /* intensity */
x0 = (float)atof(tmpstr4); /* x0 position */
y0 = (float)atof(tmpstr5); /* y0 position */
a = (float)atof(tmpstr6); /* a - size object */
b = (float)atof(tmpstr7); /* b - size object */
psi_gr1 = (float)atof(tmpstr8); /* rotation angle 1*/
}
else {
//mexErrMsgTxt("Cannot find 'Object' string in parameters file");
break; }
// printf ("C0 %.2e x0 %.2e y0 %.2e a %.2e b %.2e phi %.2e\n" , C0, x0, y0, a, b, psi_gr1);
TomoP2DObject_core(A, N, tmpstr2, C0, y0, x0, a, b, psi_gr1, 0); /* python */
}
}
else {
/**************************************************/
//printf("\n %s %i %s \n", "Temporal 2D+time model", ModelSelected, " is selected");
/* temporal phantom 2D + time (3D) */
float C1 = 0.0f, x1 = 0.0f, y1 = 0.0f, a1 = 0.0f, b1 = 0.0f, psi_gr1_1 = 0.0f;
/* loop over all components */
for(ii=0; ii<Components; ii++) {
if (fgets(str, MAXCHAR, fp) != NULL) sscanf(str, "%15s : %15s %15s %15s %15s %15s %15s %15[^;];", tmpstr1, tmpstr2, tmpstr3, tmpstr4, tmpstr5, tmpstr6, tmpstr7, tmpstr8);
else {
// mexErrMsgTxt("Unexpected the end of the line (objects loop) in parameters file");
break; }
if (strcmp(tmpstr1,"Object") == 0) {
C0 = (float)atof(tmpstr3); /* intensity */
x0 = (float)atof(tmpstr4); /* x0 position */
y0 = (float)atof(tmpstr5); /* y0 position */
a = (float)atof(tmpstr6); /* a - size object */
b = (float)atof(tmpstr7); /* b - size object */
psi_gr1 = (float)atof(tmpstr8); /* rotation angle 1*/
}
else {
// mexErrMsgTxt("Cannot find 'Object' string in parameters file");
break; }
/* check Endvar relatedparameters */
if (fgets(str, MAXCHAR, fp) != NULL) sscanf(str, "%15s : %15s %15s %15s %15s %15s %15[^;];", tmpstr1, tmpstr3, tmpstr4, tmpstr5, tmpstr6, tmpstr7, tmpstr8);
else {
// mexErrMsgTxt("Unexpected the end of the line (Endvar loop) in parameters file");
break; }
if (strcmp(tmpstr1,"Endvar") == 0) {
C1 = (float)atof(tmpstr3); /* intensity */
x1 = (float)atof(tmpstr4); /* x0 position */
y1 = (float)atof(tmpstr5); /* y0 position */
a1 = (float)atof(tmpstr6); /* a - size object */
b1 = (float)atof(tmpstr7); /* b - size object */
psi_gr1_1 = (float)atof(tmpstr8); /* rotation angle 1*/
}
else {
printf("%s\n", "Cannot find 'Endvar' string in parameters file");
break; }
/*now we know the initial parameters of the object and the final ones. We linearly extrapolate to establish steps and coordinates. */
/* calculating the full distance berween the start and the end points */
float distance = sqrtf(pow((x1 - x0),2) + pow((y1 - y0),2));
float d_dist = distance/(steps-1); /*a step over line */
float C_step = (C1 - C0)/(steps-1);
float a_step = (a1 - a)/(steps-1);
float b_step = (b1 - b)/(steps-1);
float phi_rot_step = (psi_gr1_1 - psi_gr1)/(steps-1);
int tt;
float x_t, y_t, a_t, b_t, C_t, phi_t, d_step;
/* initialize */
x_t = x0; y_t = y0; a_t = a; b_t = b; C_t = C0; phi_t = psi_gr1; d_step = d_dist;
/*loop over time frames*/
for(tt=0; tt < steps; tt++) {
TomoP2DObject_core(A, N, tmpstr2, C_t, x_t, -y_t, a_t, b_t, phi_t, tt); /* python */
/* calculating new coordinates of an object */
if (distance != 0.0f) {
float t = d_step/distance;
x_t = (1-t)*x0 + t*x1;
y_t = (1-t)*y0 + t*y1; }
else {
x_t = x0;
y_t = y0; }
d_step += d_dist;
a_t += a_step;
b_t += b_step;
C_t += C_step;
phi_t += phi_rot_step;
} /*time steps*/
} /*components loop*/
}
counter++;
}
}
}
}
}
fclose(fp);
return *A;
}
|
dist_array.h | /* -------------------------------------------------------------------------------
* Tomocam Copyright (c) 2018
*
* The Regents of the University of California, through Lawrence Berkeley
*National Laboratory (subject to receipt of any required approvals from the
*U.S. Dept. of Energy). All rights reserved.
*
* If you have questions about your rights to use or distribute this software,
* please contact Berkeley Lab's Innovation & Partnerships Office at
*IPO@lbl.gov.
*
* NOTICE. This Software was developed under funding from the U.S. Department of
* Energy and the U.S. Government consequently retains certain rights. As such,
*the U.S. Government has been granted for itself and others acting on its
*behalf a paid-up, nonexclusive, irrevocable, worldwide license in the Software
*to reproduce, distribute copies to the public, prepare derivative works, and
* perform publicly and display publicly, and to permit other to do so.
*---------------------------------------------------------------------------------
*/
#ifndef TOMOCAM_DISTARRAY__H
#define TOMOCAM_DISTARRAY__H
#include <vector>
#include <fstream>
#include "types.h"
#include "common.h"
namespace tomocam {
template <typename T>
class Partition {
private:
dim3_t dims_;
uint64_t size_;
T *first_;
int halo_[2];
public:
Partition(dim3_t d, T *pos) : dims_(d), first_(pos) {
size_ = static_cast<uint64_t>(dims_.z) * dims_.y * dims_.x;
halo_[0] = 0;
halo_[1] = 0;
}
Partition(dim3_t d, T *pos, int *h) : dims_(d), first_(pos) {
size_ = static_cast<uint64_t>(dims_.z) * dims_.y * dims_.x;
halo_[0] = h[0];
halo_[1] = h[1];
}
dim3_t dims() const { return dims_; }
uint64_t size() const { return size_; }
size_t bytes() const { return size_ * sizeof(T); }
int *halo() { return halo_; }
T *begin() { return first_; }
T *slice(int i) { return first_ + i * dims_.y * dims_.z; }
// create sub-partions
std::vector<Partition<T>> sub_partitions(int);
std::vector<Partition<T>> sub_partitions(int, int);
};
template <typename T>
class DArray {
private:
bool owns_buffer_; ///< Don't free buffer if not-owned
dim3_t dims_; ///< [Slices, Rows, Colums]
uint64_t size_; ///< Size of the alloated array
T *buffer_; ///< Pointer to data buffer
// return global index
uint64_t idx_(int i, int j, int k) {
return (i * dims_.y * static_cast<uint64_t>(dims_.z) +
j * static_cast<uint64_t>(dims_.z) + k);
}
public:
DArray(dim3_t);
DArray(np_array_t<T>);
~DArray();
// copy and move
DArray(const DArray &);
DArray& operator=(const DArray &);
DArray(DArray &&);
DArray& operator=(DArray &&);
// setup partitioning of array along slowest dimension
std::vector<Partition<T>> create_partitions(int);
// create partitionng along slowest dimension with halo
std::vector<Partition<T>> create_partitions(int, int);
// copy data to DArray
void init(T *values) {
#pragma omp parallel for
for (uint64_t i = 0; i < size_; i++)
buffer_[i] = values[i];
}
// init
void init(T v) {
#pragma omp parallel for
for (uint64_t i = 0; i < size_; i++)
buffer_[i] = v;
}
// norm2
T norm() const {
T v = 0;
#pragma omp parallel for reduction( + : v)
for (uint64_t i = 0; i < size_; i++)
v += buffer_[i] * buffer_[i];
return std::sqrt(v);
}
// sum
T sum() const {
T v = 0;
#pragma omp parallel for reduction( + : v)
for (uint64_t i = 0; i < size_; i++)
v += buffer_[i];
return v;
}
T max() const {
T v = -1E10;
#pragma omp parallel for reduction(max : v)
for (uint64_t i = 0; i < size_; i++)
if (buffer_[i] > v)
v = buffer_[i];
return v;
}
T min() const {
T v = 1E10;
#pragma omp parallel for reduction(min : v)
for (uint64_t i = 0; i < size_; i++)
if (buffer_[i] < v)
v = buffer_[i];
return v;
}
// addition operator
DArray<T> operator+(const DArray<T> & rhs) {
DArray<T> rv(dims_);
#pragma omp parallel for
for (uint64_t i = 0; i < size_; i++)
rv.buffer_[i] = buffer_[i] + rhs.buffer_[i];
return rv;
}
// subtraction operator
DArray<T> operator-(const DArray<T> & rhs) {
DArray<T> rv(dims_);
#pragma omp parallel for
for (uint64_t i = 0; i < size_; i++)
rv.buffer_[i] = buffer_[i] - rhs.buffer_[i];
return rv;
}
// add-assign
DArray<T> & operator+=(const DArray<T> &rhs) {
#pragma omp parallel for
for (uint64_t i = 0; i < size_; i++)
buffer_[i] += rhs.buffer_[i];
return *this;
}
// save array to file
void to_file(const char * filename) {
std::ofstream fout(filename, std::ios::binary);
fout.write((char *) buffer_, this->bytes());
fout.close();
}
/// dimensions of the array
dim3_t dims() const { return dims_; };
uint64_t slices() const { return dims_.x; }
uint64_t rows() const { return dims_.y; }
uint64_t cols() const { return dims_.z; }
uint64_t size() const { return size_; }
size_t bytes() const { return size_ * sizeof(T); }
// indexing
T &operator[](uint64_t i) { return buffer_[i]; }
T &operator()(int i, int j, int k) { return buffer_[idx_(i, j, k)]; }
// padded
T padded(int i, int j, int k) {
if ((i < 0) || (i >= dims_.x) ||
(j < 0) || (j >= dims_.y) ||
(k < 0) || (k >= dims_.z))
return 0;
else
return buffer_[idx_(i, j, k)];
}
/// Returns pointer to N-th slice
T *slice(int n) { return (buffer_ + n * dims_.y * dims_.z); }
/// Expose the alloaated memoy pointer
T *data() { return buffer_; }
};
} // namespace tomocam
#include "dist_array.cpp"
#endif // TOMOCAM_DISTARRAY__H
|
rose_v1_matrixmultiply2.c | /*
Naive matrix-matrix multiplication(mmm)
By C. Liao
*/
#define N 1000
#define M 1000
#define K 1000
#include <omp.h>
int i;
int j;
int k;
double a[1000][1000];
double b[1000][1000];
double c[1000][1000];
int mmm()
{
//#pragma omp parallel for private(i,j,k) shared(a,b,c)
#pragma omp parallel for private (i,j,k)
for (i = 0; i <= 999; i += 1) {
for (k = 0; k <= 999; k += 1) {
#pragma omp parallel for private (j)
for (j = 0; j <= 999; j += 1) {
c[i][j] = c[i][j] + a[i][k] * b[k][j];
}
}
}
return 0;
}
|
softmax-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file softmax-inl.h
* \brief
*/
#ifndef MXNET_OPERATOR_NN_SOFTMAX_INL_H_
#define MXNET_OPERATOR_NN_SOFTMAX_INL_H_
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include <type_traits>
#include "../mxnet_op.h"
#include "../operator_common.h"
#include "../tensor/broadcast_reduce_op.h"
#include "../../common/cuda_utils.h"
namespace mxnet {
namespace op {
namespace mxnet_op {
struct softmax_fwd {
template<typename AType>
MSHADOW_XINLINE static AType Map(float a, AType b) {
return AType(expf(a)/b);
}
template<typename AType>
MSHADOW_XINLINE static AType Map(double a, AType b) {
return AType(exp(a)/b);
}
};
struct log_softmax_fwd {
template<typename DType>
MSHADOW_XINLINE static float Map(DType a, float b) {
return a - logf(b);
}
template<typename DType>
MSHADOW_XINLINE static double Map(DType a, double b) {
return a - log(b);
}
};
template<typename OP, bool negate, typename AType, typename DType, typename OType,
typename IType, int ndim>
inline void Softmax(Stream<cpu> *s, DType *in, OType *out, IType *length,
Shape<ndim> shape, int axis, const DType temperature) {
index_t M = shape[axis];
index_t N = shape.Size()/M;
Shape<ndim> stride = calc_stride(shape);
Shape<ndim> sshape = shape;
sshape[axis] = 1;
index_t sa = stride[axis];
if (length == nullptr) {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t base = unravel_dot(i, sshape, stride);
DType mmax = negate ? -in[base] : in[base];
DType val;
for (index_t j = 1; j < M; ++j) {
val = negate ? -in[base + j*sa] : in[base + j*sa];
if (mmax < val) mmax = val;
}
AType sum = AType(0);
DType in_val;
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
if (temperature == 1.0) {
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
sum += std::exp(in_val - mmax);
}
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
out[base + j*sa] = OP::Map(in_val - mmax, sum);
}
} else {
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
sum += std::exp((in_val - mmax)/temperature);
}
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
out[base + j*sa] = OP::Map((in_val - mmax)/temperature, sum);
}
}
}
} else {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t len = static_cast<index_t>(length[i]);
index_t base = unravel_dot(i, sshape, stride);
DType mmax = negate ? -in[base] : in[base];
DType val;
for (index_t j = 1; j < len; ++j) {
val = negate ? -in[base + j*sa] : in[base + j*sa];
if (mmax < val) mmax = val;
}
for (index_t j = len; j < M; ++j) {
out[base + j*sa] = OType(0.0f);
}
AType sum = AType(0);
DType in_val;
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
if (temperature == 1.0) {
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
sum += std::exp(in_val - mmax);
}
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
out[base + j*sa] = OP::Map(in_val - mmax, sum);
}
} else {
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
sum += std::exp((in_val - mmax)/temperature);
}
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
out[base + j*sa] = OP::Map((in_val - mmax)/temperature, sum);
}
}
}
}
}
struct softmax_bwd {
template<typename DType, typename AType>
MSHADOW_XINLINE static AType Map(DType ograd, DType out, AType sum) {
return AType(out * (ograd - sum));
}
};
struct log_softmax_bwd {
template<typename AType>
MSHADOW_XINLINE static AType Map(float ograd, float out, AType sum) {
return AType(ograd - expf(out)*sum);
}
template<typename AType>
MSHADOW_XINLINE static AType Map(double ograd, double out, AType sum) {
return AType(ograd - exp(out)*sum);
}
};
template<typename OP1, typename OP2, int Req, bool negate,
typename AType, typename DType, typename OType, typename IType, int ndim>
inline void SoftmaxGrad(Stream<cpu> *s, OType *out, OType *ograd,
DType *igrad, IType *length, Shape<ndim> shape,
int axis, const DType temperature) {
index_t M = shape[axis];
index_t N = shape.Size()/M;
Shape<ndim> stride = calc_stride(shape);
Shape<ndim> sshape = shape;
sshape[axis] = 1;
index_t sa = stride[axis];
if (length != nullptr) {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t base = unravel_dot(i, sshape, stride);
index_t len = static_cast<index_t>(length[i]);
AType sum = AType(0);
for (index_t j = 0; j < len; ++j) {
sum += OP1::Map(ograd[base + j*sa], out[base + j*sa]);
}
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
DType final_result;
if (temperature == 1.0) {
for (index_t j = 0; j < M; ++j) {
final_result = negate ?
-OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) :
OP2::Map(ograd[base + j*sa], out[base + j*sa], sum);
final_result = (j < len) ? final_result : DType(0.0f);
KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result);
}
} else {
for (index_t j = 0; j < M; ++j) {
final_result = negate ?
-OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature :
OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature;
final_result = (j < len) ? final_result : DType(0.0f);
KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result);
}
}
}
} else {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t base = unravel_dot(i, sshape, stride);
AType sum = AType(0);
for (index_t j = 0; j < M; ++j) {
sum += OP1::Map(ograd[base + j*sa], out[base + j*sa]);
}
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
DType final_result;
if (temperature == 1.0) {
for (index_t j = 0; j < M; ++j) {
final_result = negate ?
-OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) :
OP2::Map(ograd[base + j*sa], out[base + j*sa], sum);
KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result);
}
} else {
for (index_t j = 0; j < M; ++j) {
final_result = negate ?
-OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature :
OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature;
KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result);
}
}
}
}
}
#ifdef __CUDACC__
template<int x_bits, typename OP, bool negate, typename AType, int ndim,
typename DType, typename OType, typename IType>
__global__ void softmax_compute_kernel(DType *in, OType *out, IType *length,
index_t M, int axis, Shape<ndim> sshape,
Shape<ndim> stride, const double temperature) {
const unsigned x_size = 1 << x_bits;
__shared__ AType smem[x_size];
index_t sa = stride[axis];
index_t base = unravel_dot(blockIdx.x, sshape, stride);
index_t x = threadIdx.x;
const index_t len = length == nullptr ? M : static_cast<index_t>(length[blockIdx.x]);
red::maximum::SetInitValue(smem[x]);
for (index_t i = x; i < len; i += x_size) {
smem[x] = ::max(smem[x], negate ? -in[base + i*sa] : in[base + i*sa]);
}
__syncthreads();
cuda::Reduce1D<red::maximum, x_bits>(smem);
__syncthreads();
DType smax = smem[0];
__syncthreads();
red::sum::SetInitValue(smem[x]);
DType val;
for (index_t i = x; i < len; i += x_size) {
val = negate ? -in[base + i*sa]:in[base + i*sa];
smem[x] += static_cast<AType>(expf((val - smax) / static_cast<AType>(temperature)));
}
__syncthreads();
cuda::Reduce1D<red::sum, x_bits>(smem);
__syncthreads();
AType ssum = smem[0];
__syncthreads();
for (index_t i = x; i < M; i += x_size) {
val = negate ? -in[base + i*sa] : in[base + i*sa];
out[base + i*sa] =
(i < len) ? OType(OP::Map((val - smax)/static_cast<DType>(temperature), ssum)) : OType(0.0f);
}
}
const int softmax_threads_per_block = 512;
template<typename OP, bool negate, typename AType, typename LType,
typename DType, typename OType, typename IType>
__global__ void softmax_stride1_compute_kernel(const DType *in, OType *out, IType *length,
const index_t M, const double temperature,
const int rows_per_block, const index_t total_rows) {
__shared__ AType scratch[softmax_threads_per_block];
__shared__ LType persistent_storage[20 * 1024 / sizeof(LType)];
const int warp_size = 32;
const int threads_per_row = softmax_threads_per_block / rows_per_block;
const int my_local_row = threadIdx.x / threads_per_row;
const int my_row = blockIdx.x * rows_per_block + my_local_row;
if (my_row >= total_rows) return;
const int my_id = threadIdx.x % threads_per_row;
const int entries_per_load = sizeof(LType)/sizeof(DType);
const index_t len = length == nullptr ? M : static_cast<index_t>(length[my_row]);
// Due to usage of MSHADOW_TYPE_SWITCH macro we are generating
// kernels where sizeof(LType) may be less than sizeof(DType),
// resulting in entries_per_load being 0.
// This is not a valid combination and is being checked against
// in the launcher code. This switch here is just to silence
// the division by zero warning generated for such invalid cases.
const int row_length = entries_per_load > 0 ? M / entries_per_load : 0;
const LType* in_aligned = reinterpret_cast<const LType*>(in);
size_t base = my_row * row_length;
for (index_t i = my_id; i < row_length; i += threads_per_row) {
persistent_storage[my_local_row * row_length + i] = in_aligned[base + i];
}
DType * row = reinterpret_cast<DType *>(persistent_storage + my_local_row * row_length);
__syncthreads();
DType my_max_value;
red::maximum::SetInitValue(my_max_value);
for (index_t i = my_id; i < len; i += threads_per_row) {
my_max_value = ::max(my_max_value, negate ? -row[i] : row[i]);
}
scratch[threadIdx.x] = my_max_value;
__syncthreads();
for (int size = threads_per_row / 2; size >= warp_size; size /= 2) {
if (my_id < size) {
scratch[threadIdx.x] = ::max(scratch[threadIdx.x], scratch[threadIdx.x + size]);
}
__syncthreads();
}
if (my_id < warp_size) {
AType my_value = warp_reduce(scratch[threadIdx.x],
[](AType x, AType y) { return ::max(x, y); });
scratch[threadIdx.x] = my_value;
}
__syncthreads();
DType smax = scratch[threadIdx.x - threadIdx.x % threads_per_row];
__syncthreads();
AType my_sum;
red::sum::SetInitValue(my_sum);
for (index_t i = my_id; i < len; i += threads_per_row) {
const DType val = negate ? -row[i] : row[i];
my_sum += static_cast<AType>(expf((val - smax) / static_cast<AType>(temperature)));
}
scratch[threadIdx.x] = my_sum;
__syncthreads();
for (int size = threads_per_row / 2; size >= warp_size; size /= 2) {
if (my_id < size) {
scratch[threadIdx.x] += scratch[threadIdx.x + size];
}
__syncthreads();
}
if (my_id < warp_size) {
AType my_value = warp_reduce(scratch[threadIdx.x],
[](AType x, AType y) { return x + y;});
scratch[threadIdx.x] = my_value;
}
__syncthreads();
AType ssum = scratch[threadIdx.x - threadIdx.x % threads_per_row];
__syncthreads();
for (index_t i = my_id; i < M; i += threads_per_row) {
const DType val = negate ? -row[i] : row[i];
row[i] = (i < len) ? DType(OP::Map((val - smax)/static_cast<DType>(temperature), ssum)) :
DType(0.0f);
}
__syncthreads();
LType* out_aligned = reinterpret_cast<LType*>(out);
for (index_t i = my_id; i < row_length; i += threads_per_row) {
out_aligned[base + i] = persistent_storage[my_local_row * row_length + i];
}
}
template<typename OP, bool negate, typename AType, typename DType, typename OType,
typename IType, int ndim>
inline void Softmax(Stream<gpu> *s, DType *in, OType *out, IType *length,
Shape<ndim> shape, int axis, const double temperature) {
const int x_bits = 7;
const int x_size = 1 << x_bits;
index_t M = shape[axis];
index_t N = shape.Size()/M;
Shape<ndim> stride = calc_stride(shape);
Shape<ndim> sshape = shape;
sshape[axis] = 1;
const size_t DSize = sizeof(DType);
// Using 20 kB of shared memory for persistent storage in the optimized case
const size_t max_opt_M = 20 * 1024 / DSize;
if (stride[axis] == 1 &&
static_cast<size_t>(M) <= max_opt_M &&
std::is_same<DType, OType>::value) {
int ltype = mxnet::common::cuda::get_load_type(M * sizeof(DType));
MXNET_LOAD_TYPE_SWITCH(ltype, LType, {
int rows_per_block = mxnet::common::cuda::get_rows_per_block(M *
sizeof(DType) / sizeof(LType),
softmax_threads_per_block);
int nblocks = (N + rows_per_block - 1) / rows_per_block;
CHECK_LE(sizeof(DType), sizeof(LType));
softmax_stride1_compute_kernel<OP, negate, AType, LType>
<<<nblocks, softmax_threads_per_block, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
in, out, length, M, temperature, rows_per_block, N);
});
MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_stride1_compute_kernel);
} else {
softmax_compute_kernel<x_bits, OP, negate, AType, ndim>
<<<N, x_size, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
in, out, length, M, axis, sshape, stride, temperature);
MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_compute_kernel);
}
}
template<typename OP1, typename OP2, int Req, bool negate, typename AType, typename LType,
typename DType, typename OType, typename IType>
__global__ void softmax_stride1_grad_kernel(const OType *out, const OType *ograd,
DType *igrad, const IType *length,
const index_t M,
const double temperature,
const int rows_per_block,
const index_t total_rows) {
__shared__ AType scratch[softmax_threads_per_block];
__shared__ LType persistent_storage[20 * 1024 / sizeof(LType)];
const int warp_size = 32;
const int threads_per_row = softmax_threads_per_block / rows_per_block;
const int my_local_row = threadIdx.x / threads_per_row;
const int my_row = blockIdx.x * rows_per_block + my_local_row;
if (my_row >= total_rows) return;
const int my_id = threadIdx.x % threads_per_row;
const int entries_per_load = sizeof(LType)/sizeof(DType);
const index_t len = length == nullptr ? M : static_cast<index_t>(length[my_row]);
// Due to usage of MSHADOW_TYPE_SWITCH macro we are generating
// kernels where sizeof(LType) may be less than sizeof(DType),
// resulting in entries_per_load being 0.
// This is not a valid combination and is being checked against
// in the launcher code. This switch here is just to silence
// the division by zero warning generated for such invalid cases.
const int row_length = entries_per_load > 0 ? M / entries_per_load : 0;
const LType* out_aligned = reinterpret_cast<const LType*>(out);
const LType* ograd_aligned = reinterpret_cast<const LType*>(ograd);
size_t base = my_row * row_length;
for (index_t i = my_id; i < row_length; i += threads_per_row) {
persistent_storage[my_local_row * row_length * 2 + i] = out_aligned[base + i];
persistent_storage[my_local_row * row_length * 2 + row_length + i] = ograd_aligned[base + i];
}
DType * row = reinterpret_cast<DType *>(persistent_storage + my_local_row * row_length * 2);
__syncthreads();
AType my_sum_value;
red::sum::SetInitValue(my_sum_value);
for (index_t i = my_id; i < len; i += threads_per_row) {
my_sum_value += OP1::Map(row[i + M], row[i]);
}
scratch[threadIdx.x] = my_sum_value;
__syncthreads();
for (int size = threads_per_row / 2; size >= warp_size; size /= 2) {
if (my_id < size) {
scratch[threadIdx.x] = scratch[threadIdx.x] + scratch[threadIdx.x + size];
}
__syncthreads();
}
if (my_id < warp_size) {
AType my_value = warp_reduce(scratch[threadIdx.x],
[](AType x, AType y) { return x + y; });
scratch[threadIdx.x] = my_value;
}
__syncthreads();
AType ssum = scratch[threadIdx.x - threadIdx.x % threads_per_row];
__syncthreads();
for (index_t i = my_id; i < M; i += threads_per_row) {
const DType val =
negate ?
-OP2::Map(row[i + M], row[i], ssum) :
OP2::Map(row[i + M], row[i], ssum);
row[i] = (i < len) ? DType(val / static_cast<DType>(temperature)) :
DType(0.0f);
if (Req == kAddTo) {
row[i] += igrad[my_row * M + i];
}
}
__syncthreads();
LType* igrad_aligned = reinterpret_cast<LType*>(igrad);
for (index_t i = my_id; i < row_length; i += threads_per_row) {
igrad_aligned[base + i] = persistent_storage[my_local_row * row_length * 2 + i];
}
}
template<int x_bits, typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim,
typename DType, typename OType, typename IType>
__global__ void softmax_grad_kernel(OType *out, OType *ograd, DType *igrad,
const IType *length, index_t M, int axis,
Shape<ndim> sshape, Shape<ndim> stride,
const double temperature) {
const unsigned x_size = 1 << x_bits;
__shared__ AType smem[x_size];
index_t sa = stride[axis];
index_t base = unravel_dot(blockIdx.x, sshape, stride);
index_t x = threadIdx.x;
index_t len = length != nullptr ? static_cast<index_t>(length[blockIdx.x]) : M;
red::sum::SetInitValue(smem[x]);
for (index_t i = x; i < len; i += x_size) {
smem[x] += OP1::Map(ograd[base + i*sa], out[base + i*sa]);
}
__syncthreads();
cuda::Reduce1D<red::sum, x_bits>(smem);
__syncthreads();
AType ssum = smem[0];
__syncthreads();
DType final_result;
for (index_t i = x; i < M; i += x_size) {
final_result =
negate ?
-OP2::Map(ograd[base + i*sa], out[base + i*sa], ssum) :
OP2::Map(ograd[base + i*sa], out[base + i*sa], ssum);
final_result = (i < len) ? final_result : DType(0.0f);
KERNEL_ASSIGN(igrad[base + i*sa], Req, final_result / static_cast<DType>(temperature));
}
}
template<typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim,
typename DType, typename OType, typename IType>
inline void SoftmaxGrad(Stream<gpu> *s, OType *out, OType *ograd,
DType *igrad, IType *length, Shape<ndim> shape, int axis,
const double temperature) {
const int x_bits = 7;
const int x_size = 1 << x_bits;
index_t M = shape[axis];
index_t N = shape.Size()/M;
Shape<ndim> stride = calc_stride(shape);
Shape<ndim> sshape = shape;
sshape[axis] = 1;
const size_t DSize = sizeof(DType);
// Using 20 kB of shared memory for persistent storage in the optimized case
// Need to store both out and ograd, so M can be only half compared to
// forward pass.
const size_t max_opt_M = 20 * 1024 / DSize / 2;
if (stride[axis] == 1 &&
static_cast<size_t>(M) <= max_opt_M &&
std::is_same<DType, OType>::value) {
int ltype = mxnet::common::cuda::get_load_type(M * sizeof(DType));
MXNET_LOAD_TYPE_SWITCH(ltype, LType, {
int rows_per_block = mxnet::common::cuda::get_rows_per_block(M *
sizeof(DType) / sizeof(LType),
softmax_threads_per_block);
int nblocks = (N + rows_per_block - 1) / rows_per_block;
CHECK_LE(sizeof(DType), sizeof(LType));
softmax_stride1_grad_kernel<OP1, OP2, Req, negate, AType, LType>
<<<nblocks, softmax_threads_per_block, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
out, ograd, igrad, length, M, temperature, rows_per_block, N);
});
MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_stride1_grad_kernel);
} else {
softmax_grad_kernel<x_bits, OP1, OP2, Req, negate, AType, ndim>
<<<N, x_size, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
out, ograd, igrad, length, M, axis, sshape, stride, temperature);
MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_grad_kernel);
}
}
#endif
} // namespace mxnet_op
struct SoftmaxParam : public dmlc::Parameter<SoftmaxParam> {
int axis;
dmlc::optional<double> temperature;
dmlc::optional<int> dtype;
dmlc::optional<bool> use_length;
DMLC_DECLARE_PARAMETER(SoftmaxParam) {
DMLC_DECLARE_FIELD(axis).set_default(-1)
.describe("The axis along which to compute softmax.");
DMLC_DECLARE_FIELD(temperature).set_default(dmlc::optional<double>())
.describe("Temperature parameter in softmax");
DMLC_DECLARE_FIELD(dtype)
.add_enum("float16", mshadow::kFloat16)
.add_enum("float32", mshadow::kFloat32)
.add_enum("float64", mshadow::kFloat64)
.set_default(dmlc::optional<int>())
.describe("DType of the output in case this can't be inferred. "
"Defaults to the same as input's dtype if not defined (dtype=None).");
DMLC_DECLARE_FIELD(use_length)
.set_default(dmlc::optional<bool>(false))
.describe("Whether to use the length input as a mask over the data input.");
}
bool operator==(const SoftmaxParam& other) const {
return this->axis == other.axis &&
this->temperature == other.temperature &&
this->dtype == other.dtype &&
this->use_length == other.use_length;
}
};
static inline bool softmax_has_dtype_override(const nnvm::NodeAttrs& attrs) {
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
return param.dtype.has_value() && param.dtype.value() != -1;
}
static inline bool softmax_use_length(const nnvm::NodeAttrs& attrs) {
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
return param.use_length.value();
}
static inline bool SoftmaxOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(out_attrs->size(), 1);
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), softmax_use_length(attrs) ? 2U : 1U);
if (softmax_has_dtype_override(attrs)) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, param.dtype.value());
type_assign(&(*in_attrs)[0], (*out_attrs)[0]);
return true;
} else {
std::vector<int> tmp = {in_attrs->at(0)};
return ElemwiseType<1, 1>(attrs, &tmp, out_attrs);
}
}
static inline bool SoftmaxOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(out_attrs->size(), 1U);
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), param.use_length.value() ? 2U : 1U);
if (param.use_length.value()) {
mxnet::TShape& dshape = in_attrs->at(0);
mxnet::TShape tmp_shape((dshape.ndim() == 1) ? 1U : dshape.ndim() - 1, 1);
int j = 0;
int axis = param.axis != -1 ? param.axis : dshape.ndim() - 1;
for (int i = 0; i < dshape.ndim(); ++i) {
if (i != axis) {
tmp_shape[j++] = dshape[i];
}
}
SHAPE_ASSIGN_CHECK(*in_attrs, 1, tmp_shape);
}
mxnet::ShapeVector tmp = {in_attrs->at(0)};
return ElemwiseShape<1, 1>(attrs, &tmp, out_attrs);
}
static inline bool SoftmaxGradOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
if (softmax_use_length(attrs)) {
mxnet::ShapeVector ins = {in_attrs->at(0), in_attrs->at(1), in_attrs->at(3)};
mxnet::ShapeVector dgrad = {out_attrs->at(0)};
bool res = ElemwiseShape<3, 1>(attrs, &ins, &dgrad);
SHAPE_ASSIGN_CHECK(*in_attrs, 0, ins[0]);
SHAPE_ASSIGN_CHECK(*in_attrs, 1, ins[1]);
SHAPE_ASSIGN_CHECK(*in_attrs, 3, ins[2]);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dgrad[0]);
mxnet::ShapeVector length = {in_attrs->at(2)};
mxnet::ShapeVector lgrad = {out_attrs->at(1)};
res = (res && ElemwiseShape<1, 1>(attrs, &length, &lgrad));
SHAPE_ASSIGN_CHECK(*in_attrs, 2, length[0]);
SHAPE_ASSIGN_CHECK(*out_attrs, 1, lgrad[0]);
return res;
} else {
return ElemwiseShape<3, 1>(attrs, in_attrs, out_attrs);
}
} else {
return ElemwiseShape<2, 1>(attrs, in_attrs, out_attrs);
}
}
static inline bool SoftmaxGradOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(out_attrs->size(), softmax_use_length(attrs) ? 2U : 1U);
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
CHECK_EQ(in_attrs->size(), softmax_use_length(attrs) ? 4U : 3U);
int in_dtype = (*in_attrs)[1];
int out_dtype = (*in_attrs)[softmax_use_length(attrs) ? 3 : 2];
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_dtype);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_dtype);
if (softmax_use_length(attrs)) {
TYPE_ASSIGN_CHECK(*out_attrs, 1, in_attrs->at(2));
}
return (*out_attrs)[0] != -1 && (*in_attrs)[0] != -1 &&
(*out_attrs)[1] != -1 && (*in_attrs)[1] != -1;
} else {
CHECK_EQ(in_attrs->size(), 2U);
int out_dtype = (*in_attrs)[1];
TYPE_ASSIGN_CHECK(*out_attrs, 0, out_dtype);
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_dtype);
return (*out_attrs)[0] != -1 && (*in_attrs)[0] != -1;
}
}
static inline std::vector<std::pair<int, int> >
SoftmaxGradOpInplaceOption(const nnvm::NodeAttrs& attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
if (softmax_use_length(attrs)) {
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}, {2, 1}, {3, 0}};
} else {
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}, {2, 0}};
}
} else {
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}};
}
}
static inline uint32_t SoftmaxGradOpNumInputs(const nnvm::NodeAttrs& attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
return softmax_use_length(attrs) ? 4 : 3;
}
return 2;
}
static inline std::vector<std::string> SoftmaxGradOpInputNames(const nnvm::NodeAttrs& attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
if (softmax_use_length(attrs)) {
return std::vector<std::string>{"ograd", "data", "length", "output"};
} else {
return std::vector<std::string>{"ograd", "data", "output"};
}
} else {
return std::vector<std::string>{"ograd", "output"};
}
}
struct SoftmaxFGradient {
const char *op_name;
std::vector<nnvm::NodeEntry> operator()(const nnvm::NodePtr& n,
const std::vector<nnvm::NodeEntry>& ograds) const {
if (softmax_has_dtype_override(n->attrs) || softmax_use_length(n->attrs)) {
return ElemwiseGradUseInOut {op_name}(n, ograds);
} else {
return ElemwiseGradUseOut {op_name}(n, ograds);
}
}
};
template<typename xpu, typename OP, bool negate = false>
void SoftmaxCompute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp) return;
CHECK_NE(req[0], kAddTo);
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
int axis = CheckAxis(param.axis, inputs[0].ndim());
const double temperature = param.temperature.has_value() ?
param.temperature.value() : 1.0;
mxnet::TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true);
bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", false);
if (!safe_acc && inputs[0].type_flag_ == mshadow::kFloat16) {
common::LogOnce("MXNET_SAFE_ACCUMULATION=1 is recommended for softmax with float16 inputs. "
"See https://mxnet.apache.org/api/faq/env_var "
"for more details.");
}
MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, DType, AType, {
MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, OType, {
int type = kInt32;
if (param.use_length.value()) {
CHECK(inputs.size() > 1)
<< "Mask needs to be provided when using softmax with use_length=True.";
type = inputs[1].type_flag_;
}
MXNET_INT32_INT64_TYPE_SWITCH(type, IType, {
IType* mask_ptr = nullptr;
if (param.use_length.value()) {
mask_ptr = inputs[1].dptr<IType>();
}
if (safe_acc) {
if (shape.ndim() == 2) {
Softmax<OP, negate, AType>(
ctx.get_stream<xpu>(), inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(), mask_ptr, shape.get<2>(),
axis, static_cast<DType>(temperature));
} else {
Softmax<OP, negate, AType>(
ctx.get_stream<xpu>(), inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(), mask_ptr, shape.get<3>(),
axis, static_cast<DType>(temperature));
}
} else {
if (shape.ndim() == 2) {
Softmax<OP, negate, DType>(
ctx.get_stream<xpu>(), inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(), mask_ptr, shape.get<2>(),
axis, static_cast<DType>(temperature));
} else {
Softmax<OP, negate, DType>(
ctx.get_stream<xpu>(), inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(), mask_ptr, shape.get<3>(),
axis, static_cast<DType>(temperature));
}
}
});
});
});
}
template<typename xpu, typename OP1, typename OP2, bool negate = false>
void SoftmaxGradCompute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (softmax_use_length(attrs)) {
MXNET_INT32_INT64_TYPE_SWITCH(inputs[2].type_flag_, IType, {
if (req[1] != kNullOp) {
mxnet_op::Kernel<mxnet_op::set_zero, xpu>::Launch(
ctx.get_stream<xpu>(), outputs[1].Size(), outputs[1].dptr<IType>());
}
});
}
if (req[0] == kNullOp) return;
const int itype = softmax_use_length(attrs) ? inputs[2].type_flag_ : kInt32;
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
int axis = CheckAxis(param.axis, inputs[0].ndim());
const double temperature = param.temperature.has_value() ?
param.temperature.value() : 1.0;
mxnet::TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true);
int out_idx = softmax_has_dtype_override(attrs) ? 2 : 1;
out_idx = softmax_use_length(attrs) ? 3 : out_idx;
bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", false);
MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, OType, AType, {
MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MXNET_INT32_INT64_TYPE_SWITCH(itype, IType, {
IType * length_ptr = nullptr;
if (softmax_use_length(attrs)) {
length_ptr = inputs[2].dptr<IType>();
}
if (safe_acc) {
if (shape.ndim() == 2) {
SoftmaxGrad<OP1, OP2, Req, negate, AType>(
ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(), outputs[0].dptr<DType>(),
length_ptr, shape.get<2>(), axis,
static_cast<DType>(temperature));
} else {
SoftmaxGrad<OP1, OP2, Req, negate, AType>(
ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(), outputs[0].dptr<DType>(),
length_ptr, shape.get<3>(), axis,
static_cast<DType>(temperature));
}
} else {
if (shape.ndim() == 2) {
SoftmaxGrad<OP1, OP2, Req, negate, DType>(
ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(), outputs[0].dptr<DType>(),
length_ptr, shape.get<2>(), axis,
static_cast<DType>(temperature));
} else {
SoftmaxGrad<OP1, OP2, Req, negate, DType>(
ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(), outputs[0].dptr<DType>(),
length_ptr, shape.get<3>(), axis,
static_cast<DType>(temperature));
}
}
});
});
});
});
}
} // namespace op
} // namespace mxnet
namespace std {
template<>
struct hash<mxnet::op::SoftmaxParam> {
size_t operator()(const mxnet::op::SoftmaxParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.axis);
ret = dmlc::HashCombine(ret, val.temperature);
ret = dmlc::HashCombine(ret, val.dtype);
ret = dmlc::HashCombine(ret, val.use_length);
return ret;
}
};
} // namespace std
#endif // MXNET_OPERATOR_NN_SOFTMAX_INL_H_
|
GB_unop__isinf_bool_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__isinf_bool_fp64
// op(A') function: GB_unop_tran__isinf_bool_fp64
// C type: bool
// A type: double
// cast: double cij = (aij)
// unaryop: cij = isinf (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = isinf (x) ;
// casting
#define GB_CAST(z, aij) \
double z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = (aij) ; \
Cx [pC] = isinf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISINF || GxB_NO_BOOL || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__isinf_bool_fp64
(
bool *Cx, // Cx and Ax may be aliased
const double *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = (aij) ;
Cx [p] = isinf (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__isinf_bool_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
dnnl_quantize_asym-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file dnnl_quantize_asym-inl.h
* \brief implementation of asymmetric quantize operation using DNNL
*/
#ifndef MXNET_OPERATOR_QUANTIZATION_DNNL_DNNL_QUANTIZE_ASYM_INL_H_
#define MXNET_OPERATOR_QUANTIZATION_DNNL_DNNL_QUANTIZE_ASYM_INL_H_
#if MXNET_USE_ONEDNN == 1
#include <memory>
#include <vector>
#include "operator/nn/dnnl/dnnl_base-inl.h"
#include "operator/quantization/quantize_asym-inl.h"
namespace mxnet {
namespace op {
class DNNLQuantizeAsymOp {
public:
explicit DNNLQuantizeAsymOp(const nnvm::NodeAttrs& attrs)
: param_(nnvm::get<QuantizeAsymParam>(attrs.parsed)) {}
void Forward(const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs);
private:
QuantizeAsymParam param_;
bool initialized_{false};
float cached_scale_{0.f};
float cached_shift_{0.f};
dnnl::memory::desc o_desc_;
dnnl_args_map_t args_;
std::shared_ptr<dnnl::reorder> fwd_pd_;
};
void DNNLQuantizeAsymOp::Forward(const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
using mshadow::red::limits::MaxValue;
using mshadow::red::limits::MinValue;
NDArray in_buffer = inputs[0];
float scale = 0.f;
float shift = 0.f;
// Pass through quantized data
if (inputs[0].dtype() == mshadow::kUint8) {
*outputs[1].data().dptr<float>() = 1;
*outputs[2].data().dptr<float>() = 0;
if (req[0] != kWriteInplace) {
const_cast<NDArray&>(outputs[0]).CopyFrom(*inputs[0].GetDNNLData());
DNNLStream::Get()->Submit();
}
} else {
in_buffer = inputs[0].Reorder2Default();
const dnnl::memory* i_mem = in_buffer.GetDNNLData();
float* in_ptr = in_buffer.data().dptr<float>();
const int nthreads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (inputs[0].dtype() == mshadow::kInt8) {
*outputs[1].data().dptr<float>() = 1;
*outputs[2].data().dptr<float>() = 128;
#pragma omp parallel for num_threads(nthreads)
for (index_t i = 0; i < static_cast<index_t>(in_buffer.shape().Size()); ++i) {
in_ptr[i] += 128.0f;
}
} else if (inputs[0].dtype() == mshadow::kFloat32) {
if (param_.min_calib_range.has_value() && param_.max_calib_range.has_value()) {
scale =
MaxValue<uint8_t>() / (param_.max_calib_range.value() - param_.min_calib_range.value());
shift = MaxValue<uint8_t>() - param_.max_calib_range.value() * scale;
} else {
float data_min = mshadow::red::limits::MaxValue<float>();
float data_max = mshadow::red::limits::MinValue<float>();
std::vector<float> data_maxs(nthreads, data_max);
std::vector<float> data_mins(nthreads, data_min);
#pragma omp parallel for num_threads(nthreads)
for (index_t i = 0; i < static_cast<index_t>(in_buffer.shape().Size()); i++) {
int tid = omp_get_thread_num();
if (in_ptr[i] > data_maxs[tid])
data_maxs[tid] = in_ptr[i];
if (in_ptr[i] < data_mins[tid])
data_mins[tid] = in_ptr[i];
}
for (index_t i = 0; i < nthreads; i++) {
if (data_maxs[i] > data_max)
data_max = data_maxs[i];
if (data_mins[i] < data_min)
data_min = data_mins[i];
}
scale = MaxValue<uint8_t>() / (data_max - data_min);
shift = MaxValue<uint8_t>() - data_max * scale;
}
if (initialized_ && (cached_scale_ != scale || cached_shift_ != shift))
initialized_ = false;
}
*outputs[1].data().dptr<float>() = scale;
*outputs[2].data().dptr<float>() = shift;
if (!initialized_) {
cached_scale_ = scale;
cached_shift_ = shift;
dnnl::primitive_attr attr;
attr.set_rnn_data_qparams(scale, shift);
const dnnl::engine& cpu_engine = mxnet::CpuEngine::Get()->get_engine();
const dnnl::memory::desc& i_desc = i_mem->get_desc();
o_desc_ = i_desc;
o_desc_.data.data_type = get_dnnl_type_t(outputs[0].dtype());
dnnl::reorder::primitive_desc reorder_pd(cpu_engine, i_desc, cpu_engine, o_desc_, attr);
fwd_pd_ = std::make_shared<dnnl::reorder>(reorder_pd);
initialized_ = true;
}
dnnl_output_t o_mem = CreateDNNLMem(outputs[0], o_desc_, req[0]);
args_[DNNL_ARG_FROM] = *i_mem;
args_[DNNL_ARG_TO] = *o_mem.second;
DNNLStream::Get()->RegisterPrimArgs(*fwd_pd_, args_);
CommitOutput(outputs[0], o_mem);
DNNLStream::Get()->Submit();
}
}
void DNNLQuantizeAsymForward(const OpStatePtr& state_ptr,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
if (inputs[0].shape().ndim() == 3 && inputs[0].dtype() == mshadow::kFloat32) {
DNNLQuantizeAsymOp& op = state_ptr.get_state<DNNLQuantizeAsymOp>();
op.Forward(ctx, inputs, req, outputs);
} else {
FallBackCompute(QuantizeAsymForward<cpu>, state_ptr, ctx, inputs, req, outputs);
}
}
} // namespace op
} // namespace mxnet
#endif // MXNET_USE_ONEDNN == 1
#endif // MXNET_OPERATOR_QUANTIZATION_DNNL_DNNL_QUANTIZE_ASYM_INL_H_
|
poisson_3d-a.pluto.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <math.h>
/*
* N is the number of points
* T is the number of timesteps
*/
#ifdef HAS_DECLS
#include "decls.h"
#else
#define N 600L
#define T 600L
#endif
#define NUM_FP_OPS 15
/* Define our arrays */
// double A[2][N][N][N];
double total=0; double sum_err_sqr=0;
int chtotal=0;
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) {
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char * argv[]) {
long int t, i, j, k;
const int BASE = 1024;
long count=0;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0;
// double A[2][N][N][N];
double ****A = (double ****)malloc(2 * sizeof (double ***));
int l;
for (l = 0; l < 2; l++){
A[l] = (double ***) malloc(N * sizeof(double **));
for (i = 0; i < N; i++){
A[l][i] = (double **) malloc(N * sizeof(double *));
for (j = 0; j < N; j++)
A[l][i][j] = (double *) malloc(N * sizeof (double));
}
}
printf("Number of points = %ld\t|Number of timesteps = %ld\t", N, T);
/* Initialization */
srand(42); // seed with a constant value to verify results
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
for (k = 0; k < N; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
A[1][i][j][k] = 0.0;
}
}
}
#ifdef TIME
gettimeofday(&start, 0);
#endif
// #undef N
// #define N 150L
#undef T
#define T 300L
/* Copyright (C) 1991-2012 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* We do support the IEC 559 math functionality, real and complex. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((N >= 3) && (T >= 1)) {
for (t1=-1;t1<=T-1;t1++) {
lbp=ceild(t1,2);
ubp=floord(2*t1+N-1,4);
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(ceild(t1-1,2),ceild(t1-1,4)),ceild(4*t2-N,4));t3<=min(min(floord(2*T+N-3,4),floord(t1+2*t2+N,4)),floord(2*t1+N+1,4));t3++) {
for (t4=max(max(max(ceild(t1-1023,1024),ceild(4*t2-N-2044,2048)),ceild(4*t3-N-2044,2048)),ceild(1023*t1-1046529,1048576));t4<=min(min(min(floord(4*t3+N,2048),floord(2*T+N-3,2048)),floord(t1+2*t2+N,2048)),floord(2*t1+N+1,2048));t4++) {
if ((t1 <= min(floord(2048*t4-N+1,2),floord(4*t2+2048*t4-N-1,4))) && (t2 <= 512*t4-1) && (t3 <= 512*t4-1) && (t4 >= ceild(N-1,2048))) {
if ((N+1)%2 == 0) {
for (t6=max(4*t2,-4*t1+4*t2+4096*t4-2*N+1);t6<=min(4*t2+3,-4*t1+4*t2+4096*t4-2*N+4);t6++) {
for (t7=max(4*t3,2048*t4-N+3);t7<=4*t3+3;t7++) {
A[0][(-2048*t4+t6+N-2)][(-2048*t4+t7+N-2)][(N-2)] = 2.666*A[1][(-2048*t4+t6+N-2)][(-2048*t4+t7+N-2)][(N-2)] - (0.166*A[1][(-2048*t4+t6+N-2)-1][(-2048*t4+t7+N-2)][(N-2)] + 0.166*A[1][(-2048*t4+t6+N-2)+1][(-2048*t4+t7+N-2)][(N-2)] + 0.166*A[1][(-2048*t4+t6+N-2)][(-2048*t4+t7+N-2)-1][(N-2)] + 0.166*A[1][(-2048*t4+t6+N-2)][(-2048*t4+t7+N-2)+1][(N-2)] + 0.166*A[1][(-2048*t4+t6+N-2)][(-2048*t4+t7+N-2)][(N-2)+1] + 0.166*A[1][(-2048*t4+t6+N-2)][(-2048*t4+t7+N-2)][(N-2)-1])- (0.0833*A[1][(-2048*t4+t6+N-2)-1][(-2048*t4+t7+N-2)-1][(N-2)] + 0.0833*A[1][(-2048*t4+t6+N-2)+1][(-2048*t4+t7+N-2)-1][(N-2)] + 0.0833*A[1][(-2048*t4+t6+N-2)-1][(-2048*t4+t7+N-2)+1][(N-2)] + 0.0833*A[1][(-2048*t4+t6+N-2)+1][(-2048*t4+t7+N-2)+1][(N-2)] + 0.0833*A[1][(-2048*t4+t6+N-2)-1][(-2048*t4+t7+N-2)][(N-2)-1] + 0.0833*A[1][(-2048*t4+t6+N-2)+1][(-2048*t4+t7+N-2)][(N-2)-1] + 0.0833*A[1][(-2048*t4+t6+N-2)][(-2048*t4+t7+N-2)-1][(N-2)-1] + 0.0833*A[1][(-2048*t4+t6+N-2)][(-2048*t4+t7+N-2)+1][(N-2)-1] + 0.0833*A[1][(-2048*t4+t6+N-2)-1][(-2048*t4+t7+N-2)][(N-2)+1] + 0.0833*A[1][(-2048*t4+t6+N-2)+1][(-2048*t4+t7+N-2)][(N-2)+1] + 0.0833*A[1][(-2048*t4+t6+N-2)][(-2048*t4+t7+N-2)-1][(N-2)+1] + 0.0833*A[1][(-2048*t4+t6+N-2)][(-2048*t4+t7+N-2)+1][(N-2)+1]);;
}
}
}
}
if ((t1 <= min(floord(4*t3-N+1,2),floord(4*t2+4*t3-N-1,4))) && (t2 <= t3-1) && (t3 >= ceild(N-1,4))) {
if ((N+1)%2 == 0) {
for (t6=max(4*t2,-4*t1+4*t2+8*t3-2*N+1);t6<=min(4*t2+3,-4*t1+4*t2+8*t3-2*N+4);t6++) {
for (t8=max(2048*t4,4*t3-N+3);t8<=min(4*t3,2048*t4+2047);t8++) {
A[0][(-4*t3+t6+N-2)][(N-2)][(-4*t3+t8+N-2)] = 2.666*A[1][(-4*t3+t6+N-2)][(N-2)][(-4*t3+t8+N-2)] - (0.166*A[1][(-4*t3+t6+N-2)-1][(N-2)][(-4*t3+t8+N-2)] + 0.166*A[1][(-4*t3+t6+N-2)+1][(N-2)][(-4*t3+t8+N-2)] + 0.166*A[1][(-4*t3+t6+N-2)][(N-2)-1][(-4*t3+t8+N-2)] + 0.166*A[1][(-4*t3+t6+N-2)][(N-2)+1][(-4*t3+t8+N-2)] + 0.166*A[1][(-4*t3+t6+N-2)][(N-2)][(-4*t3+t8+N-2)+1] + 0.166*A[1][(-4*t3+t6+N-2)][(N-2)][(-4*t3+t8+N-2)-1])- (0.0833*A[1][(-4*t3+t6+N-2)-1][(N-2)-1][(-4*t3+t8+N-2)] + 0.0833*A[1][(-4*t3+t6+N-2)+1][(N-2)-1][(-4*t3+t8+N-2)] + 0.0833*A[1][(-4*t3+t6+N-2)-1][(N-2)+1][(-4*t3+t8+N-2)] + 0.0833*A[1][(-4*t3+t6+N-2)+1][(N-2)+1][(-4*t3+t8+N-2)] + 0.0833*A[1][(-4*t3+t6+N-2)-1][(N-2)][(-4*t3+t8+N-2)-1] + 0.0833*A[1][(-4*t3+t6+N-2)+1][(N-2)][(-4*t3+t8+N-2)-1] + 0.0833*A[1][(-4*t3+t6+N-2)][(N-2)-1][(-4*t3+t8+N-2)-1] + 0.0833*A[1][(-4*t3+t6+N-2)][(N-2)+1][(-4*t3+t8+N-2)-1] + 0.0833*A[1][(-4*t3+t6+N-2)-1][(N-2)][(-4*t3+t8+N-2)+1] + 0.0833*A[1][(-4*t3+t6+N-2)+1][(N-2)][(-4*t3+t8+N-2)+1] + 0.0833*A[1][(-4*t3+t6+N-2)][(N-2)-1][(-4*t3+t8+N-2)+1] + 0.0833*A[1][(-4*t3+t6+N-2)][(N-2)+1][(-4*t3+t8+N-2)+1]);;
}
}
}
}
if ((t1 >= 0) && (2*t1 == 4*t2-N+1)) {
for (t7=max(4*t3,2*t1+2);t7<=min(4*t3+3,2*t1+N-1);t7++) {
for (t8=max(2048*t4,2*t1+2);t8<=min(2048*t4+2047,2*t1+N-1);t8++) {
if ((2*t1+3*N+1)%4 == 0) {
A[0][(N-2)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(N-2)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(N-2)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(N-2)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(N-2)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(N-2)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(N-2)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(N-2)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(N-2)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(N-2)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(N-2)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(N-2)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(N-2)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(N-2)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(N-2)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(N-2)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(N-2)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(N-2)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(N-2)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(N-2)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
}
}
if ((t1 == 2*t2) && (t1 >= max(ceild(4*t3-N+1,2),ceild(2048*t4-N+1,2)))) {
for (t7=max(4*t3,2*t1+2);t7<=min(4*t3+3,2*t1+N-1);t7++) {
for (t8=max(2048*t4,2*t1+2);t8<=min(2048*t4+2047,2*t1+N-1);t8++) {
if (t1%2 == 0) {
A[0][1][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][1][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][1 -1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][1 +1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][1][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][1 -1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][1 +1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][1 -1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][1 +1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][1 -1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][1 +1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][1][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][1][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][1 -1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][1 +1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][1][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][1][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
}
}
if ((t1 <= min(min(T-2,2*t3-1),1024*t4+1021)) && (2*t1 == 4*t2-N+1)) {
for (t7=max(4*t3,2*t1+3);t7<=min(2*t1+N,4*t3+3);t7++) {
for (t8=max(2048*t4,2*t1+3);t8<=min(2*t1+N,2048*t4+2047);t8++) {
if ((2*t1+3*N+1)%4 == 0) {
A[1][(N-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(N-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(N-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(N-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(N-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(N-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(N-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(N-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(N-2)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(N-2)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(N-2)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(N-2)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(N-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(N-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(N-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(N-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(N-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(N-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(N-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(N-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
}
if ((t1 == 2*t2) && (t1 <= min(min(T-2,2*t3-2),1024*t4+1020))) {
for (t7=4*t3;t7<=min(2*t1+N,4*t3+3);t7++) {
for (t8=max(2048*t4,2*t1+3);t8<=min(2*t1+N,2048*t4+2047);t8++) {
if (t1%2 == 0) {
A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][1 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
}
if ((t1 == 2*t3) && (t1 <= min(floord(2048*t4-N+2048,2),2*t2-2)) && (t1 >= max(ceild(4*t2-N+2,2),1024*t4))) {
for (t8=2*t1+1;t8<=2*t1+N-2;t8++) {
if (t1%2 == 0) {
A[1][(-2*t1+4*t2)][1][(-2*t1+t8)] = 2.666*A[0][(-2*t1+4*t2)][1][(-2*t1+t8)] - (0.166*A[0][(-2*t1+4*t2)-1][1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)+1][1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][1 -1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][1 +1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][1][(-2*t1+t8)+1] + 0.166*A[0][(-2*t1+4*t2)][1][(-2*t1+t8)-1])- (0.0833*A[0][(-2*t1+4*t2)-1][1 -1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][1 -1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][1 +1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][1 +1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)+1][1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][1 -1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][1 +1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)-1][1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)+1][1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][1 -1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][1 +1][(-2*t1+t8)+1]);;
}
}
for (t7=2*t1+2;t7<=2*t1+3;t7++) {
if (t1%2 == 0) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][1] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1 +1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1 -1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1 -1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1 -1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1 -1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1 -1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1 +1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1 +1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1 +1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1 +1]);;
}
for (t8=2*t1+2;t8<=2*t1+N-2;t8++) {
if (t1%2 == 0) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)+1]);;
}
if (t1%2 == 0) {
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
if (t1%2 == 0) {
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)+1]);;
}
}
for (t6=4*t2+1;t6<=min(4*t2+2,2*t1+N-1);t6++) {
for (t7=2*t1+2;t7<=2*t1+3;t7++) {
for (t8=2*t1+2;t8<=2*t1+N-1;t8++) {
if (t1%2 == 0) {
A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
}
}
}
if ((t1 == 2*t3) && (t1 <= min(min(floord(2048*t4-N+2048,2),2*t2-2),1024*t4-2)) && (t1 >= max(ceild(4*t2-N+2,2),ceild(2048*t4-N+2,2)))) {
for (t8=2048*t4;t8<=2*t1+N-2;t8++) {
if (t1%2 == 0) {
A[1][(-2*t1+4*t2)][1][(-2*t1+t8)] = 2.666*A[0][(-2*t1+4*t2)][1][(-2*t1+t8)] - (0.166*A[0][(-2*t1+4*t2)-1][1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)+1][1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][1 -1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][1 +1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][1][(-2*t1+t8)+1] + 0.166*A[0][(-2*t1+4*t2)][1][(-2*t1+t8)-1])- (0.0833*A[0][(-2*t1+4*t2)-1][1 -1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][1 -1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][1 +1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][1 +1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)+1][1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][1 -1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][1 +1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)-1][1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)+1][1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][1 -1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][1 +1][(-2*t1+t8)+1]);;
}
}
for (t7=2*t1+2;t7<=2*t1+3;t7++) {
for (t8=2048*t4;t8<=2*t1+N-2;t8++) {
if (t1%2 == 0) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)+1]);;
}
if (t1%2 == 0) {
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
if (t1%2 == 0) {
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)+1]);;
}
}
for (t6=4*t2+1;t6<=min(4*t2+2,2*t1+N-1);t6++) {
for (t7=2*t1+2;t7<=2*t1+3;t7++) {
for (t8=2048*t4;t8<=2*t1+N-1;t8++) {
if (t1%2 == 0) {
A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
}
}
}
if ((t1 == 2*t3) && (t1 <= 2*t2-2) && (t1 >= max(max(ceild(4*t2-N+2,2),ceild(2048*t4-N+2049,2)),1024*t4))) {
for (t8=2*t1+1;t8<=2048*t4+2047;t8++) {
if (t1%2 == 0) {
A[1][(-2*t1+4*t2)][1][(-2*t1+t8)] = 2.666*A[0][(-2*t1+4*t2)][1][(-2*t1+t8)] - (0.166*A[0][(-2*t1+4*t2)-1][1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)+1][1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][1 -1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][1 +1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][1][(-2*t1+t8)+1] + 0.166*A[0][(-2*t1+4*t2)][1][(-2*t1+t8)-1])- (0.0833*A[0][(-2*t1+4*t2)-1][1 -1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][1 -1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][1 +1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][1 +1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)+1][1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][1 -1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][1 +1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)-1][1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)+1][1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][1 -1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][1 +1][(-2*t1+t8)+1]);;
}
}
for (t7=2*t1+2;t7<=2*t1+3;t7++) {
if (t1%2 == 0) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][1] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1 +1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1 -1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1 -1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1 -1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1 -1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1 -1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1 +1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1 +1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1 +1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1 +1]);;
}
for (t8=2*t1+2;t8<=2048*t4+2047;t8++) {
if (t1%2 == 0) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)+1]);;
}
if (t1%2 == 0) {
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
}
for (t6=4*t2+1;t6<=min(4*t2+2,2*t1+N-1);t6++) {
for (t7=2*t1+2;t7<=2*t1+3;t7++) {
for (t8=2*t1+2;t8<=2048*t4+2047;t8++) {
if (t1%2 == 0) {
A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
}
}
}
if ((t1 == 2*t3) && (t1 <= min(2*t2-2,1024*t4-2)) && (t1 >= max(ceild(4*t2-N+2,2),ceild(2048*t4-N+2049,2)))) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
if (t1%2 == 0) {
A[1][(-2*t1+4*t2)][1][(-2*t1+t8)] = 2.666*A[0][(-2*t1+4*t2)][1][(-2*t1+t8)] - (0.166*A[0][(-2*t1+4*t2)-1][1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)+1][1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][1 -1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][1 +1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][1][(-2*t1+t8)+1] + 0.166*A[0][(-2*t1+4*t2)][1][(-2*t1+t8)-1])- (0.0833*A[0][(-2*t1+4*t2)-1][1 -1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][1 -1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][1 +1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][1 +1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)+1][1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][1 -1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][1 +1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)-1][1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)+1][1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][1 -1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][1 +1][(-2*t1+t8)+1]);;
}
}
for (t7=2*t1+2;t7<=2*t1+3;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
if (t1%2 == 0) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)+1]);;
}
if (t1%2 == 0) {
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
}
for (t6=4*t2+1;t6<=min(4*t2+2,2*t1+N-1);t6++) {
for (t7=2*t1+2;t7<=2*t1+3;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
if (t1%2 == 0) {
A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
}
}
}
if ((t1 <= min(min(floord(4*t3-N+4,2),floord(2048*t4-N+2048,2)),2*t2-1)) && (t1 >= max(ceild(4*t3-N+2,2),1024*t4))) {
for (t7=4*t3;t7<=2*t1+N-2;t7++) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][1] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1 +1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1 -1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1 -1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1 -1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1 -1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1 -1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1 +1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1 +1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1 +1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1 +1]);;
for (t8=2*t1+2;t8<=2*t1+N-2;t8++) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)+1]);;
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)+1]);;
}
for (t8=2*t1+2;t8<=2*t1+N-1;t8++) {
A[0][(-2*t1+4*t2-1)][(N-2)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+4*t2-1)][(N-2)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(N-2)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(N-2)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(N-2)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(N-2)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(N-2)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(N-2)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(N-2)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(N-2)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(N-2)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(N-2)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(N-2)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(N-2)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(N-2)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(N-2)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(N-2)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(N-2)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(N-2)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(N-2)+1][(-2*t1+t8-1)+1]);;
}
for (t6=4*t2+1;t6<=min(4*t2+2,2*t1+N-1);t6++) {
for (t7=4*t3;t7<=2*t1+N-1;t7++) {
for (t8=2*t1+2;t8<=2*t1+N-1;t8++) {
A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
}
}
if ((t1 <= min(min(floord(2048*t4-N+2048,2),2*t2-1),2*t3-1)) && (t1 >= max(max(ceild(4*t2-N+2,2),ceild(4*t3-N+5,2)),1024*t4))) {
for (t7=4*t3;t7<=4*t3+3;t7++) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][1] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1 +1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1 -1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1 -1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1 -1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1 -1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1 -1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1 +1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1 +1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1 +1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1 +1]);;
for (t8=2*t1+2;t8<=2*t1+N-2;t8++) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)+1]);;
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)+1]);;
}
for (t6=4*t2+1;t6<=min(4*t2+2,2*t1+N-1);t6++) {
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2*t1+2;t8<=2*t1+N-1;t8++) {
A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
}
}
if ((t1 <= min(min(floord(4*t3-N+4,2),2*t2-1),1024*t4+1022)) && (t1 >= max(max(ceild(4*t3-N+2,2),ceild(2048*t4-N+2049,2)),1024*t4))) {
for (t7=4*t3;t7<=2*t1+N-2;t7++) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][1] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1 +1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1 -1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1 -1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1 -1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1 -1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1 -1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1 +1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1 +1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1 +1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1 +1]);;
for (t8=2*t1+2;t8<=2048*t4+2047;t8++) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)+1]);;
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
for (t8=2*t1+2;t8<=2048*t4+2047;t8++) {
A[0][(-2*t1+4*t2-1)][(N-2)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+4*t2-1)][(N-2)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(N-2)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(N-2)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(N-2)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(N-2)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(N-2)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(N-2)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(N-2)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(N-2)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(N-2)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(N-2)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(N-2)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(N-2)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(N-2)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(N-2)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(N-2)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(N-2)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(N-2)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(N-2)+1][(-2*t1+t8-1)+1]);;
}
for (t6=4*t2+1;t6<=min(4*t2+2,2*t1+N-1);t6++) {
for (t7=4*t3;t7<=2*t1+N-1;t7++) {
for (t8=2*t1+2;t8<=2048*t4+2047;t8++) {
A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
}
}
if ((t1 <= min(min(2*t2-1,2*t3-1),1024*t4+1022)) && (t1 >= max(max(max(ceild(4*t2-N+2,2),ceild(4*t3-N+5,2)),ceild(2048*t4-N+2049,2)),1024*t4))) {
for (t7=4*t3;t7<=4*t3+3;t7++) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][1] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1 +1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1 -1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1 -1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1 -1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1 -1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1 -1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1 +1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1 +1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1 +1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1 +1]);;
for (t8=2*t1+2;t8<=2048*t4+2047;t8++) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)+1]);;
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
for (t6=4*t2+1;t6<=min(4*t2+2,2*t1+N-1);t6++) {
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2*t1+2;t8<=2048*t4+2047;t8++) {
A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
}
}
if ((t1 <= min(min(min(floord(4*t3-N+4,2),floord(2048*t4-N+2048,2)),2*t2-1),1024*t4-1)) && (t1 >= max(max(0,ceild(4*t3-N+2,2)),ceild(2048*t4-N+2,2)))) {
for (t7=4*t3;t7<=2*t1+N-2;t7++) {
for (t8=2048*t4;t8<=2*t1+N-2;t8++) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)+1]);;
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)+1]);;
}
for (t8=2048*t4;t8<=2*t1+N-1;t8++) {
A[0][(-2*t1+4*t2-1)][(N-2)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+4*t2-1)][(N-2)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(N-2)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(N-2)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(N-2)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(N-2)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(N-2)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(N-2)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(N-2)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(N-2)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(N-2)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(N-2)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(N-2)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(N-2)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(N-2)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(N-2)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(N-2)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(N-2)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(N-2)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(N-2)+1][(-2*t1+t8-1)+1]);;
}
for (t6=4*t2+1;t6<=min(4*t2+2,2*t1+N-1);t6++) {
for (t7=4*t3;t7<=2*t1+N-1;t7++) {
for (t8=2048*t4;t8<=2*t1+N-1;t8++) {
A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
}
}
if ((t1 <= min(min(min(floord(2048*t4-N+2048,2),2*t2-1),2*t3-1),1024*t4-1)) && (t1 >= max(max(max(0,ceild(4*t2-N+2,2)),ceild(4*t3-N+5,2)),ceild(2048*t4-N+2,2)))) {
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2048*t4;t8<=2*t1+N-2;t8++) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)+1]);;
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)+1]);;
}
for (t6=4*t2+1;t6<=min(4*t2+2,2*t1+N-1);t6++) {
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2048*t4;t8<=2*t1+N-1;t8++) {
A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
}
}
if ((t1 <= min(min(floord(4*t3-N+4,2),2*t2-1),1024*t4-1)) && (t1 >= max(max(0,ceild(4*t3-N+2,2)),ceild(2048*t4-N+2049,2)))) {
for (t7=4*t3;t7<=2*t1+N-2;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)+1]);;
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
A[0][(-2*t1+4*t2-1)][(N-2)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+4*t2-1)][(N-2)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(N-2)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(N-2)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(N-2)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(N-2)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(N-2)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(N-2)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(N-2)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(N-2)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(N-2)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(N-2)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(N-2)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(N-2)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(N-2)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(N-2)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(N-2)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(N-2)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(N-2)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(N-2)+1][(-2*t1+t8-1)+1]);;
}
for (t6=4*t2+1;t6<=min(4*t2+2,2*t1+N-1);t6++) {
for (t7=4*t3;t7<=2*t1+N-1;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
}
}
if ((t1 <= min(min(2*t2-1,2*t3-1),1024*t4-1)) && (t1 >= max(max(max(0,ceild(4*t2-N+2,2)),ceild(4*t3-N+5,2)),ceild(2048*t4-N+2049,2)))) {
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)+1]);;
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
for (t6=4*t2+1;t6<=min(4*t2+2,2*t1+N-1);t6++) {
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
}
}
if ((N == 4) && (t1 == 2*t2-1) && (t1 == 2*t3-1) && (t1 <= min(T-2,1024*t4+1021))) {
for (t7=2*t1+3;t7<=2*t1+4;t7++) {
for (t8=2*t1+3;t8<=2*t1+4;t8++) {
if ((t1+1)%2 == 0) {
A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][1 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=2*t1+3;t7<=2*t1+4;t7++) {
for (t8=2*t1+3;t8<=2*t1+4;t8++) {
if ((t1+1)%2 == 0) {
A[1][2][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][2 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=2*t1+4;t7<=2*t1+5;t7++) {
for (t8=2*t1+4;t8<=2*t1+5;t8++) {
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
}
}
if ((N >= 5) && (t1 == 2*t2-1) && (t1 == 2*t3-1) && (t1 <= min(floord(2048*t4-N+2046,2),T-2)) && (t1 >= 1024*t4-1)) {
for (t7=2*t1+3;t7<=2*t1+5;t7++) {
for (t8=2*t1+3;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][1 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=2*t1+3;t7<=2*t1+5;t7++) {
for (t8=2*t1+3;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][2][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][2 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t8=2*t1+3;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][3][1][(-2*t1+t8-2)] = 2.666*A[0][3][1][(-2*t1+t8-2)] - (0.166*A[0][3 -1][1][(-2*t1+t8-2)] + 0.166*A[0][3 +1][1][(-2*t1+t8-2)] + 0.166*A[0][3][1 -1][(-2*t1+t8-2)] + 0.166*A[0][3][1 +1][(-2*t1+t8-2)] + 0.166*A[0][3][1][(-2*t1+t8-2)+1] + 0.166*A[0][3][1][(-2*t1+t8-2)-1])- (0.0833*A[0][3 -1][1 -1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][1 -1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][1 +1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][1 +1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 +1][1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][1 -1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][1 +1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 -1][1][(-2*t1+t8-2)+1] + 0.0833*A[0][3 +1][1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][1 -1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][1 +1][(-2*t1+t8-2)+1]);;
}
}
for (t7=2*t1+4;t7<=2*t1+5;t7++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][1] = 2.666*A[0][3][(-2*t1+t7-2)][1] - (0.166*A[0][3 -1][(-2*t1+t7-2)][1] + 0.166*A[0][3 +1][(-2*t1+t7-2)][1] + 0.166*A[0][3][(-2*t1+t7-2)-1][1] + 0.166*A[0][3][(-2*t1+t7-2)+1][1] + 0.166*A[0][3][(-2*t1+t7-2)][1 +1] + 0.166*A[0][3][(-2*t1+t7-2)][1 -1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][1 -1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][1 -1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][1 +1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][1 +1]);;
}
for (t8=2*t1+4;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(N-2)] = 2.666*A[1][2][(-2*t1+t7-3)][(N-2)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)][(N-2)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(N-2)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(N-2)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(N-2)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(N-2)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(N-2)+1]);;
}
}
}
if ((t1 == 2*t2-1) && (t1 == 2*t3-1) && (t1 <= min(min(floord(2048*t4-N+2046,2),T-2),1024*t4-3)) && (t1 >= ceild(2048*t4-N,2))) {
for (t7=2*t1+3;t7<=2*t1+5;t7++) {
for (t8=2048*t4;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][1 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=2*t1+3;t7<=2*t1+5;t7++) {
for (t8=2048*t4;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][2][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][2 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t8=2048*t4;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][3][1][(-2*t1+t8-2)] = 2.666*A[0][3][1][(-2*t1+t8-2)] - (0.166*A[0][3 -1][1][(-2*t1+t8-2)] + 0.166*A[0][3 +1][1][(-2*t1+t8-2)] + 0.166*A[0][3][1 -1][(-2*t1+t8-2)] + 0.166*A[0][3][1 +1][(-2*t1+t8-2)] + 0.166*A[0][3][1][(-2*t1+t8-2)+1] + 0.166*A[0][3][1][(-2*t1+t8-2)-1])- (0.0833*A[0][3 -1][1 -1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][1 -1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][1 +1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][1 +1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 +1][1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][1 -1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][1 +1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 -1][1][(-2*t1+t8-2)+1] + 0.0833*A[0][3 +1][1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][1 -1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][1 +1][(-2*t1+t8-2)+1]);;
}
}
for (t7=2*t1+4;t7<=2*t1+5;t7++) {
for (t8=2048*t4;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(N-2)] = 2.666*A[1][2][(-2*t1+t7-3)][(N-2)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)][(N-2)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(N-2)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(N-2)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(N-2)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(N-2)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(N-2)+1]);;
}
}
}
if ((t1 == 2*t2-1) && (t1 == 2*t3-1) && (t1 <= min(T-2,1024*t4+1021)) && (t1 >= max(ceild(2048*t4-N+2047,2),1024*t4-1))) {
for (t7=2*t1+3;t7<=2*t1+5;t7++) {
for (t8=2*t1+3;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][1 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=2*t1+3;t7<=2*t1+5;t7++) {
for (t8=2*t1+3;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][2][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][2 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t8=2*t1+3;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][3][1][(-2*t1+t8-2)] = 2.666*A[0][3][1][(-2*t1+t8-2)] - (0.166*A[0][3 -1][1][(-2*t1+t8-2)] + 0.166*A[0][3 +1][1][(-2*t1+t8-2)] + 0.166*A[0][3][1 -1][(-2*t1+t8-2)] + 0.166*A[0][3][1 +1][(-2*t1+t8-2)] + 0.166*A[0][3][1][(-2*t1+t8-2)+1] + 0.166*A[0][3][1][(-2*t1+t8-2)-1])- (0.0833*A[0][3 -1][1 -1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][1 -1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][1 +1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][1 +1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 +1][1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][1 -1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][1 +1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 -1][1][(-2*t1+t8-2)+1] + 0.0833*A[0][3 +1][1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][1 -1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][1 +1][(-2*t1+t8-2)+1]);;
}
}
for (t7=2*t1+4;t7<=2*t1+5;t7++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][1] = 2.666*A[0][3][(-2*t1+t7-2)][1] - (0.166*A[0][3 -1][(-2*t1+t7-2)][1] + 0.166*A[0][3 +1][(-2*t1+t7-2)][1] + 0.166*A[0][3][(-2*t1+t7-2)-1][1] + 0.166*A[0][3][(-2*t1+t7-2)+1][1] + 0.166*A[0][3][(-2*t1+t7-2)][1 +1] + 0.166*A[0][3][(-2*t1+t7-2)][1 -1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][1 -1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][1 -1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][1 +1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][1 +1]);;
}
for (t8=2*t1+4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
}
}
if ((t1 == 2*t2-1) && (t1 == 2*t3-1) && (t1 <= min(T-2,1024*t4-3)) && (t1 >= ceild(2048*t4-N+2047,2))) {
for (t7=2*t1+3;t7<=2*t1+5;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][1 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=2*t1+3;t7<=2*t1+5;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][2][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][2 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][3][1][(-2*t1+t8-2)] = 2.666*A[0][3][1][(-2*t1+t8-2)] - (0.166*A[0][3 -1][1][(-2*t1+t8-2)] + 0.166*A[0][3 +1][1][(-2*t1+t8-2)] + 0.166*A[0][3][1 -1][(-2*t1+t8-2)] + 0.166*A[0][3][1 +1][(-2*t1+t8-2)] + 0.166*A[0][3][1][(-2*t1+t8-2)+1] + 0.166*A[0][3][1][(-2*t1+t8-2)-1])- (0.0833*A[0][3 -1][1 -1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][1 -1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][1 +1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][1 +1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 +1][1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][1 -1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][1 +1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 -1][1][(-2*t1+t8-2)+1] + 0.0833*A[0][3 +1][1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][1 -1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][1 +1][(-2*t1+t8-2)+1]);;
}
}
for (t7=2*t1+4;t7<=2*t1+5;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
}
}
if ((N >= 5) && (t1 == 2*t2-1) && (t1 <= min(min(floord(4*t3-N+2,2),floord(2048*t4-N+2046,2)),T-2)) && (t1 >= max(ceild(4*t3-N,2),1024*t4-1))) {
for (t7=4*t3;t7<=2*t1+N;t7++) {
for (t8=2*t1+3;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][1 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=2*t1+N;t7++) {
for (t8=2*t1+3;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][2][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][2 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=2*t1+N;t7++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][1] = 2.666*A[0][3][(-2*t1+t7-2)][1] - (0.166*A[0][3 -1][(-2*t1+t7-2)][1] + 0.166*A[0][3 +1][(-2*t1+t7-2)][1] + 0.166*A[0][3][(-2*t1+t7-2)-1][1] + 0.166*A[0][3][(-2*t1+t7-2)+1][1] + 0.166*A[0][3][(-2*t1+t7-2)][1 +1] + 0.166*A[0][3][(-2*t1+t7-2)][1 -1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][1 -1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][1 -1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][1 +1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][1 +1]);;
}
for (t8=2*t1+4;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(N-2)] = 2.666*A[1][2][(-2*t1+t7-3)][(N-2)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)][(N-2)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(N-2)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(N-2)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(N-2)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(N-2)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(N-2)+1]);;
}
}
for (t8=2*t1+4;t8<=2*t1+N+1;t8++) {
if ((t1+1)%2 == 0) {
A[0][2][(N-2)][(-2*t1+t8-3)] = 2.666*A[1][2][(N-2)][(-2*t1+t8-3)] - (0.166*A[1][2 -1][(N-2)][(-2*t1+t8-3)] + 0.166*A[1][2 +1][(N-2)][(-2*t1+t8-3)] + 0.166*A[1][2][(N-2)-1][(-2*t1+t8-3)] + 0.166*A[1][2][(N-2)+1][(-2*t1+t8-3)] + 0.166*A[1][2][(N-2)][(-2*t1+t8-3)+1] + 0.166*A[1][2][(N-2)][(-2*t1+t8-3)-1])- (0.0833*A[1][2 -1][(N-2)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(N-2)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(N-2)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(N-2)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(N-2)][(-2*t1+t8-3)-1] + 0.0833*A[1][2 +1][(N-2)][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(N-2)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(N-2)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][2 -1][(N-2)][(-2*t1+t8-3)+1] + 0.0833*A[1][2 +1][(N-2)][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(N-2)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(N-2)+1][(-2*t1+t8-3)+1]);;
}
}
}
if ((t1 == 2*t2-1) && (t1 <= min(min(min(floord(4*t3-N+2,2),floord(2048*t4-N+2046,2)),T-2),1024*t4-3)) && (t1 >= ceild(4*t3-N,2))) {
for (t7=4*t3;t7<=2*t1+N;t7++) {
for (t8=2048*t4;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][1 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=2*t1+N;t7++) {
for (t8=2048*t4;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][2][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][2 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=2*t1+N;t7++) {
for (t8=2048*t4;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(N-2)] = 2.666*A[1][2][(-2*t1+t7-3)][(N-2)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)][(N-2)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(N-2)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(N-2)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(N-2)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(N-2)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(N-2)+1]);;
}
}
for (t8=2048*t4;t8<=2*t1+N+1;t8++) {
if ((t1+1)%2 == 0) {
A[0][2][(N-2)][(-2*t1+t8-3)] = 2.666*A[1][2][(N-2)][(-2*t1+t8-3)] - (0.166*A[1][2 -1][(N-2)][(-2*t1+t8-3)] + 0.166*A[1][2 +1][(N-2)][(-2*t1+t8-3)] + 0.166*A[1][2][(N-2)-1][(-2*t1+t8-3)] + 0.166*A[1][2][(N-2)+1][(-2*t1+t8-3)] + 0.166*A[1][2][(N-2)][(-2*t1+t8-3)+1] + 0.166*A[1][2][(N-2)][(-2*t1+t8-3)-1])- (0.0833*A[1][2 -1][(N-2)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(N-2)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(N-2)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(N-2)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(N-2)][(-2*t1+t8-3)-1] + 0.0833*A[1][2 +1][(N-2)][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(N-2)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(N-2)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][2 -1][(N-2)][(-2*t1+t8-3)+1] + 0.0833*A[1][2 +1][(N-2)][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(N-2)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(N-2)+1][(-2*t1+t8-3)+1]);;
}
}
}
if ((t1 == 2*t2-1) && (t1 <= min(min(floord(2048*t4-N+2046,2),T-2),2*t3-3)) && (t1 >= max(ceild(4*t3-N+3,2),1024*t4-1))) {
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2*t1+3;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][1 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2*t1+3;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][2][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][2 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=4*t3+3;t7++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][1] = 2.666*A[0][3][(-2*t1+t7-2)][1] - (0.166*A[0][3 -1][(-2*t1+t7-2)][1] + 0.166*A[0][3 +1][(-2*t1+t7-2)][1] + 0.166*A[0][3][(-2*t1+t7-2)-1][1] + 0.166*A[0][3][(-2*t1+t7-2)+1][1] + 0.166*A[0][3][(-2*t1+t7-2)][1 +1] + 0.166*A[0][3][(-2*t1+t7-2)][1 -1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][1 -1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][1 -1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][1 +1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][1 +1]);;
}
for (t8=2*t1+4;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(N-2)] = 2.666*A[1][2][(-2*t1+t7-3)][(N-2)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)][(N-2)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(N-2)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(N-2)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(N-2)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(N-2)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(N-2)+1]);;
}
}
}
if ((t1 == 2*t2-1) && (t1 <= min(min(min(floord(2048*t4-N+2046,2),T-2),2*t3-3),1024*t4-3)) && (t1 >= max(ceild(2048*t4-N,2),ceild(4*t3-N+3,2)))) {
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2048*t4;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][1 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2048*t4;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][2][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][2 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2048*t4;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(N-2)] = 2.666*A[1][2][(-2*t1+t7-3)][(N-2)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)][(N-2)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(N-2)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(N-2)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(N-2)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(N-2)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(N-2)+1]);;
}
}
}
if ((t1 == 2*t2-1) && (t1 <= min(min(floord(4*t3-N+2,2),T-2),1024*t4+1021)) && (t1 >= max(max(ceild(4*t3-N,2),ceild(2048*t4-N+2047,2)),1024*t4-1))) {
for (t7=4*t3;t7<=2*t1+N;t7++) {
for (t8=2*t1+3;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][1 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=2*t1+N;t7++) {
for (t8=2*t1+3;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][2][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][2 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=2*t1+N;t7++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][1] = 2.666*A[0][3][(-2*t1+t7-2)][1] - (0.166*A[0][3 -1][(-2*t1+t7-2)][1] + 0.166*A[0][3 +1][(-2*t1+t7-2)][1] + 0.166*A[0][3][(-2*t1+t7-2)-1][1] + 0.166*A[0][3][(-2*t1+t7-2)+1][1] + 0.166*A[0][3][(-2*t1+t7-2)][1 +1] + 0.166*A[0][3][(-2*t1+t7-2)][1 -1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][1 -1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][1 -1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][1 +1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][1 +1]);;
}
for (t8=2*t1+4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
}
for (t8=2*t1+4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[0][2][(N-2)][(-2*t1+t8-3)] = 2.666*A[1][2][(N-2)][(-2*t1+t8-3)] - (0.166*A[1][2 -1][(N-2)][(-2*t1+t8-3)] + 0.166*A[1][2 +1][(N-2)][(-2*t1+t8-3)] + 0.166*A[1][2][(N-2)-1][(-2*t1+t8-3)] + 0.166*A[1][2][(N-2)+1][(-2*t1+t8-3)] + 0.166*A[1][2][(N-2)][(-2*t1+t8-3)+1] + 0.166*A[1][2][(N-2)][(-2*t1+t8-3)-1])- (0.0833*A[1][2 -1][(N-2)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(N-2)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(N-2)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(N-2)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(N-2)][(-2*t1+t8-3)-1] + 0.0833*A[1][2 +1][(N-2)][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(N-2)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(N-2)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][2 -1][(N-2)][(-2*t1+t8-3)+1] + 0.0833*A[1][2 +1][(N-2)][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(N-2)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(N-2)+1][(-2*t1+t8-3)+1]);;
}
}
}
if ((t1 == 2*t2-1) && (t1 <= min(min(floord(4*t3-N+2,2),T-2),1024*t4-3)) && (t1 >= max(ceild(4*t3-N,2),ceild(2048*t4-N+2047,2)))) {
for (t7=4*t3;t7<=2*t1+N;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][1 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=2*t1+N;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][2][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][2 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=2*t1+N;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
}
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[0][2][(N-2)][(-2*t1+t8-3)] = 2.666*A[1][2][(N-2)][(-2*t1+t8-3)] - (0.166*A[1][2 -1][(N-2)][(-2*t1+t8-3)] + 0.166*A[1][2 +1][(N-2)][(-2*t1+t8-3)] + 0.166*A[1][2][(N-2)-1][(-2*t1+t8-3)] + 0.166*A[1][2][(N-2)+1][(-2*t1+t8-3)] + 0.166*A[1][2][(N-2)][(-2*t1+t8-3)+1] + 0.166*A[1][2][(N-2)][(-2*t1+t8-3)-1])- (0.0833*A[1][2 -1][(N-2)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(N-2)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(N-2)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(N-2)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(N-2)][(-2*t1+t8-3)-1] + 0.0833*A[1][2 +1][(N-2)][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(N-2)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(N-2)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][2 -1][(N-2)][(-2*t1+t8-3)+1] + 0.0833*A[1][2 +1][(N-2)][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(N-2)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(N-2)+1][(-2*t1+t8-3)+1]);;
}
}
}
if ((t1 == 2*t2-1) && (t1 <= min(min(T-2,2*t3-3),1024*t4+1021)) && (t1 >= max(max(ceild(4*t3-N+3,2),ceild(2048*t4-N+2047,2)),1024*t4-1))) {
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2*t1+3;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][1 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2*t1+3;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][2][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][2 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=4*t3+3;t7++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][1] = 2.666*A[0][3][(-2*t1+t7-2)][1] - (0.166*A[0][3 -1][(-2*t1+t7-2)][1] + 0.166*A[0][3 +1][(-2*t1+t7-2)][1] + 0.166*A[0][3][(-2*t1+t7-2)-1][1] + 0.166*A[0][3][(-2*t1+t7-2)+1][1] + 0.166*A[0][3][(-2*t1+t7-2)][1 +1] + 0.166*A[0][3][(-2*t1+t7-2)][1 -1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][1 -1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][1 -1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][1 +1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][1 +1]);;
}
for (t8=2*t1+4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
}
}
if ((t1 == 2*t2-1) && (t1 <= min(min(T-2,2*t3-3),1024*t4-3)) && (t1 >= max(ceild(4*t3-N+3,2),ceild(2048*t4-N+2047,2)))) {
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][1 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][2][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][2 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
}
}
if ((N >= 6) && (t1 <= min(min(T-2,2*t3-1),1024*t4+1021)) && (2*t1 == 4*t2-N+2)) {
for (t6=2*t1+N-1;t6<=2*t1+N;t6++) {
for (t7=max(4*t3,2*t1+3);t7<=min(2*t1+N,4*t3+3);t7++) {
for (t8=max(2048*t4,2*t1+3);t8<=min(2*t1+N,2048*t4+2047);t8++) {
if ((2*t1+3*N+2)%4 == 0) {
A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
}
for (t7=max(4*t3,2*t1+4);t7<=4*t3+3;t7++) {
for (t8=max(2048*t4,2*t1+4);t8<=min(2048*t4+2047,2*t1+N+1);t8++) {
if ((2*t1+3*N+2)%4 == 0) {
A[0][(N-2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][(N-2)][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][(N-2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(N-2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(N-2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][(N-2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][(N-2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][(N-2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][(N-2)-1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(N-2)+1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(N-2)-1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(N-2)+1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(N-2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(N-2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(N-2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][(N-2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][(N-2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(N-2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(N-2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][(N-2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
}
}
if ((t1 == 2*t3-1) && (t1 <= min(min(floord(2048*t4-N+2046,2),T-2),2*t2-3)) && (t1 >= max(ceild(4*t2-N+3,2),1024*t4-1))) {
for (t6=4*t2+1;t6<=4*t2+2;t6++) {
for (t7=2*t1+3;t7<=2*t1+5;t7++) {
for (t8=2*t1+3;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
}
for (t8=2*t1+3;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][(-2*t1+4*t2+1)][1][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+4*t2+1)][1][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+4*t2+1)-1][1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)+1][1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][1 -1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][1 +1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][1][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+4*t2+1)][1][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][1 -1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][1 -1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][1 +1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][1 +1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][1 -1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][1 +1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][1 -1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][1 +1][(-2*t1+t8-2)+1]);;
}
}
for (t7=2*t1+4;t7<=2*t1+5;t7++) {
if ((t1+1)%2 == 0) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1 +1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1 -1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1 +1]);;
}
for (t8=2*t1+4;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
if ((t1+1)%2 == 0) {
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
if ((t1+1)%2 == 0) {
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)+1]);;
}
}
}
if ((t1 == 2*t3-1) && (t1 <= min(min(min(floord(2048*t4-N+2046,2),T-2),2*t2-3),1024*t4-3)) && (t1 >= max(ceild(2048*t4-N,2),ceild(4*t2-N+3,2)))) {
for (t6=4*t2+1;t6<=4*t2+2;t6++) {
for (t7=2*t1+3;t7<=2*t1+5;t7++) {
for (t8=2048*t4;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
}
for (t8=2048*t4;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][(-2*t1+4*t2+1)][1][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+4*t2+1)][1][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+4*t2+1)-1][1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)+1][1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][1 -1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][1 +1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][1][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+4*t2+1)][1][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][1 -1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][1 -1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][1 +1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][1 +1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][1 -1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][1 +1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][1 -1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][1 +1][(-2*t1+t8-2)+1]);;
}
}
for (t7=2*t1+4;t7<=2*t1+5;t7++) {
for (t8=2048*t4;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
if ((t1+1)%2 == 0) {
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
if ((t1+1)%2 == 0) {
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)+1]);;
}
}
}
if ((t1 == 2*t3-1) && (t1 <= min(min(T-2,2*t2-3),1024*t4+1021)) && (t1 >= max(max(ceild(4*t2-N+3,2),ceild(2048*t4-N+2047,2)),1024*t4-1))) {
for (t6=4*t2+1;t6<=4*t2+2;t6++) {
for (t7=2*t1+3;t7<=2*t1+5;t7++) {
for (t8=2*t1+3;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
}
for (t8=2*t1+3;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][(-2*t1+4*t2+1)][1][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+4*t2+1)][1][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+4*t2+1)-1][1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)+1][1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][1 -1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][1 +1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][1][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+4*t2+1)][1][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][1 -1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][1 -1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][1 +1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][1 +1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][1 -1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][1 +1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][1 -1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][1 +1][(-2*t1+t8-2)+1]);;
}
}
for (t7=2*t1+4;t7<=2*t1+5;t7++) {
if ((t1+1)%2 == 0) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1 +1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1 -1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1 +1]);;
}
for (t8=2*t1+4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
if ((t1+1)%2 == 0) {
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
}
}
if ((t1 == 2*t3-1) && (t1 <= min(min(T-2,2*t2-3),1024*t4-3)) && (t1 >= max(ceild(4*t2-N+3,2),ceild(2048*t4-N+2047,2)))) {
for (t6=4*t2+1;t6<=4*t2+2;t6++) {
for (t7=2*t1+3;t7<=2*t1+5;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
}
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][(-2*t1+4*t2+1)][1][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+4*t2+1)][1][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+4*t2+1)-1][1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)+1][1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][1 -1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][1 +1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][1][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+4*t2+1)][1][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][1 -1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][1 -1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][1 +1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][1 +1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][1 -1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][1 +1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][1 -1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][1 +1][(-2*t1+t8-2)+1]);;
}
}
for (t7=2*t1+4;t7<=2*t1+5;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
if ((t1+1)%2 == 0) {
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
}
}
if ((t1 <= min(min(min(floord(4*t3-N+2,2),floord(2048*t4-N+2046,2)),T-2),2*t2-2)) && (t1 >= max(max(ceild(4*t3-N,2),ceild(4*t2-N+3,2)),1024*t4-1))) {
for (t6=4*t2+1;t6<=4*t2+2;t6++) {
for (t7=4*t3;t7<=2*t1+N;t7++) {
for (t8=2*t1+3;t8<=2*t1+N;t8++) {
A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=2*t1+N;t7++) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1 +1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1 -1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1 +1]);;
for (t8=2*t1+4;t8<=2*t1+N;t8++) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)+1]);;
}
for (t8=2*t1+4;t8<=2*t1+N+1;t8++) {
A[0][(-2*t1+4*t2)][(N-2)][(-2*t1+t8-3)] = 2.666*A[1][(-2*t1+4*t2)][(N-2)][(-2*t1+t8-3)] - (0.166*A[1][(-2*t1+4*t2)-1][(N-2)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)+1][(N-2)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(N-2)-1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(N-2)+1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(N-2)][(-2*t1+t8-3)+1] + 0.166*A[1][(-2*t1+4*t2)][(N-2)][(-2*t1+t8-3)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(N-2)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(N-2)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(N-2)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(N-2)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(N-2)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(N-2)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(N-2)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(N-2)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(N-2)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(N-2)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(N-2)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(N-2)+1][(-2*t1+t8-3)+1]);;
}
}
if ((t1 <= min(min(min(min(floord(4*t3-N+2,2),floord(2048*t4-N+2046,2)),T-2),2*t2-2),1024*t4-2)) && (t1 >= max(ceild(4*t3-N,2),ceild(4*t2-N+3,2)))) {
for (t6=4*t2+1;t6<=4*t2+2;t6++) {
for (t7=4*t3;t7<=2*t1+N;t7++) {
for (t8=2048*t4;t8<=2*t1+N;t8++) {
A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=2*t1+N;t7++) {
for (t8=2048*t4;t8<=2*t1+N;t8++) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)+1]);;
}
for (t8=2048*t4;t8<=2*t1+N+1;t8++) {
A[0][(-2*t1+4*t2)][(N-2)][(-2*t1+t8-3)] = 2.666*A[1][(-2*t1+4*t2)][(N-2)][(-2*t1+t8-3)] - (0.166*A[1][(-2*t1+4*t2)-1][(N-2)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)+1][(N-2)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(N-2)-1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(N-2)+1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(N-2)][(-2*t1+t8-3)+1] + 0.166*A[1][(-2*t1+4*t2)][(N-2)][(-2*t1+t8-3)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(N-2)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(N-2)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(N-2)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(N-2)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(N-2)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(N-2)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(N-2)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(N-2)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(N-2)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(N-2)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(N-2)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(N-2)+1][(-2*t1+t8-3)+1]);;
}
}
if ((t1 <= min(min(min(floord(2048*t4-N+2046,2),T-2),2*t2-2),2*t3-2)) && (t1 >= max(max(ceild(4*t2-N+3,2),ceild(4*t3-N+3,2)),1024*t4-1))) {
for (t6=4*t2+1;t6<=4*t2+2;t6++) {
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2*t1+3;t8<=2*t1+N;t8++) {
A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=4*t3+3;t7++) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1 +1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1 -1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1 +1]);;
for (t8=2*t1+4;t8<=2*t1+N;t8++) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)+1]);;
}
}
if ((t1 <= min(min(min(min(floord(2048*t4-N+2046,2),T-2),2*t2-2),2*t3-2),1024*t4-2)) && (t1 >= max(max(ceild(2048*t4-N,2),ceild(4*t2-N+3,2)),ceild(4*t3-N+3,2)))) {
for (t6=4*t2+1;t6<=4*t2+2;t6++) {
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2048*t4;t8<=2*t1+N;t8++) {
A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2048*t4;t8<=2*t1+N;t8++) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)+1]);;
}
}
if ((t1 <= min(min(min(floord(4*t3-N+2,2),T-2),2*t2-2),1024*t4+1021)) && (t1 >= max(max(max(ceild(4*t3-N,2),ceild(4*t2-N+3,2)),ceild(2048*t4-N+2047,2)),1024*t4-1))) {
for (t6=4*t2+1;t6<=4*t2+2;t6++) {
for (t7=4*t3;t7<=2*t1+N;t7++) {
for (t8=2*t1+3;t8<=2048*t4+2047;t8++) {
A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=2*t1+N;t7++) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1 +1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1 -1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1 +1]);;
for (t8=2*t1+4;t8<=2048*t4+2047;t8++) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
for (t8=2*t1+4;t8<=2048*t4+2047;t8++) {
A[0][(-2*t1+4*t2)][(N-2)][(-2*t1+t8-3)] = 2.666*A[1][(-2*t1+4*t2)][(N-2)][(-2*t1+t8-3)] - (0.166*A[1][(-2*t1+4*t2)-1][(N-2)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)+1][(N-2)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(N-2)-1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(N-2)+1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(N-2)][(-2*t1+t8-3)+1] + 0.166*A[1][(-2*t1+4*t2)][(N-2)][(-2*t1+t8-3)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(N-2)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(N-2)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(N-2)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(N-2)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(N-2)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(N-2)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(N-2)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(N-2)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(N-2)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(N-2)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(N-2)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(N-2)+1][(-2*t1+t8-3)+1]);;
}
}
if ((t1 <= min(min(min(floord(4*t3-N+2,2),T-2),2*t2-2),1024*t4-2)) && (t1 >= max(max(ceild(4*t3-N,2),ceild(4*t2-N+3,2)),ceild(2048*t4-N+2047,2)))) {
for (t6=4*t2+1;t6<=4*t2+2;t6++) {
for (t7=4*t3;t7<=2*t1+N;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=2*t1+N;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
A[0][(-2*t1+4*t2)][(N-2)][(-2*t1+t8-3)] = 2.666*A[1][(-2*t1+4*t2)][(N-2)][(-2*t1+t8-3)] - (0.166*A[1][(-2*t1+4*t2)-1][(N-2)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)+1][(N-2)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(N-2)-1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(N-2)+1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(N-2)][(-2*t1+t8-3)+1] + 0.166*A[1][(-2*t1+4*t2)][(N-2)][(-2*t1+t8-3)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(N-2)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(N-2)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(N-2)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(N-2)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(N-2)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(N-2)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(N-2)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(N-2)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(N-2)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(N-2)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(N-2)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(N-2)+1][(-2*t1+t8-3)+1]);;
}
}
if ((t1 <= min(min(min(T-2,2*t2-2),2*t3-2),1024*t4+1021)) && (t1 >= max(max(max(ceild(4*t2-N+3,2),ceild(4*t3-N+3,2)),ceild(2048*t4-N+2047,2)),1024*t4-1))) {
for (t6=4*t2+1;t6<=4*t2+2;t6++) {
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2*t1+3;t8<=2048*t4+2047;t8++) {
A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=4*t3+3;t7++) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1 +1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1 -1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1 +1]);;
for (t8=2*t1+4;t8<=2048*t4+2047;t8++) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
}
if ((t1 <= min(min(min(T-2,2*t2-2),2*t3-2),1024*t4-2)) && (t1 >= max(max(ceild(4*t2-N+3,2),ceild(4*t3-N+3,2)),ceild(2048*t4-N+2047,2)))) {
for (t6=4*t2+1;t6<=4*t2+2;t6++) {
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
}
if ((t1 >= 2*t3) && (t3 <= min(floord(T-2,2),512*t4+510))) {
for (t6=max(max(4*t2,4*t3+3),-4*t1+4*t2+8*t3+1);t6<=min(min(4*t2+3,4*t3+N),-4*t1+4*t2+8*t3+4);t6++) {
for (t8=max(2048*t4,4*t3+3);t8<=min(4*t3+N,2048*t4+2047);t8++) {
A[1][(-4*t3+t6-2)][1][(-4*t3+t8-2)] = 2.666*A[0][(-4*t3+t6-2)][1][(-4*t3+t8-2)] - (0.166*A[0][(-4*t3+t6-2)-1][1][(-4*t3+t8-2)] + 0.166*A[0][(-4*t3+t6-2)+1][1][(-4*t3+t8-2)] + 0.166*A[0][(-4*t3+t6-2)][1 -1][(-4*t3+t8-2)] + 0.166*A[0][(-4*t3+t6-2)][1 +1][(-4*t3+t8-2)] + 0.166*A[0][(-4*t3+t6-2)][1][(-4*t3+t8-2)+1] + 0.166*A[0][(-4*t3+t6-2)][1][(-4*t3+t8-2)-1])- (0.0833*A[0][(-4*t3+t6-2)-1][1 -1][(-4*t3+t8-2)] + 0.0833*A[0][(-4*t3+t6-2)+1][1 -1][(-4*t3+t8-2)] + 0.0833*A[0][(-4*t3+t6-2)-1][1 +1][(-4*t3+t8-2)] + 0.0833*A[0][(-4*t3+t6-2)+1][1 +1][(-4*t3+t8-2)] + 0.0833*A[0][(-4*t3+t6-2)-1][1][(-4*t3+t8-2)-1] + 0.0833*A[0][(-4*t3+t6-2)+1][1][(-4*t3+t8-2)-1] + 0.0833*A[0][(-4*t3+t6-2)][1 -1][(-4*t3+t8-2)-1] + 0.0833*A[0][(-4*t3+t6-2)][1 +1][(-4*t3+t8-2)-1] + 0.0833*A[0][(-4*t3+t6-2)-1][1][(-4*t3+t8-2)+1] + 0.0833*A[0][(-4*t3+t6-2)+1][1][(-4*t3+t8-2)+1] + 0.0833*A[0][(-4*t3+t6-2)][1 -1][(-4*t3+t8-2)+1] + 0.0833*A[0][(-4*t3+t6-2)][1 +1][(-4*t3+t8-2)+1]);;
}
}
}
if ((t1 >= 1024*t4+1022) && (t4 <= floord(T-1024,1024))) {
for (t6=max(max(4*t2,2048*t4+2047),-4*t1+4*t2+4096*t4+4089);t6<=min(min(4*t2+3,2048*t4+N+2044),-4*t1+4*t2+4096*t4+4092);t6++) {
for (t7=max(4*t3,2048*t4+2047);t7<=min(4*t3+3,2048*t4+N+2044);t7++) {
A[1][(-2048*t4+t6-2046)][(-2048*t4+t7-2046)][1] = 2.666*A[0][(-2048*t4+t6-2046)][(-2048*t4+t7-2046)][1] - (0.166*A[0][(-2048*t4+t6-2046)-1][(-2048*t4+t7-2046)][1] + 0.166*A[0][(-2048*t4+t6-2046)+1][(-2048*t4+t7-2046)][1] + 0.166*A[0][(-2048*t4+t6-2046)][(-2048*t4+t7-2046)-1][1] + 0.166*A[0][(-2048*t4+t6-2046)][(-2048*t4+t7-2046)+1][1] + 0.166*A[0][(-2048*t4+t6-2046)][(-2048*t4+t7-2046)][1 +1] + 0.166*A[0][(-2048*t4+t6-2046)][(-2048*t4+t7-2046)][1 -1])- (0.0833*A[0][(-2048*t4+t6-2046)-1][(-2048*t4+t7-2046)-1][1] + 0.0833*A[0][(-2048*t4+t6-2046)+1][(-2048*t4+t7-2046)-1][1] + 0.0833*A[0][(-2048*t4+t6-2046)-1][(-2048*t4+t7-2046)+1][1] + 0.0833*A[0][(-2048*t4+t6-2046)+1][(-2048*t4+t7-2046)+1][1] + 0.0833*A[0][(-2048*t4+t6-2046)-1][(-2048*t4+t7-2046)][1 -1] + 0.0833*A[0][(-2048*t4+t6-2046)+1][(-2048*t4+t7-2046)][1 -1] + 0.0833*A[0][(-2048*t4+t6-2046)][(-2048*t4+t7-2046)-1][1 -1] + 0.0833*A[0][(-2048*t4+t6-2046)][(-2048*t4+t7-2046)+1][1 -1] + 0.0833*A[0][(-2048*t4+t6-2046)-1][(-2048*t4+t7-2046)][1 +1] + 0.0833*A[0][(-2048*t4+t6-2046)+1][(-2048*t4+t7-2046)][1 +1] + 0.0833*A[0][(-2048*t4+t6-2046)][(-2048*t4+t7-2046)-1][1 +1] + 0.0833*A[0][(-2048*t4+t6-2046)][(-2048*t4+t7-2046)+1][1 +1]);;
}
}
}
}
}
}
}
}
/* End of CLooG code */
// #undef N
// #define N 300L
#undef T
#define T 600L
#ifdef TIME
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6);
printf("|Time taken: %7.5lfs\t", (tdiff * 0.001) * 1.0e3);
printf("|MFLOPS: %f\n", ((((double)NUM_FP_OPS * N *N * N * (T-1)) / tdiff) / 1000000L));
#endif
#ifdef VERIFY
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
for (k = 0; k < N; k++) {
total+= A[T%2][i][j][k] ;
}
}
}
printf("|sum: %e\t", total);
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
for (k = 0; k < N; k++) {
sum_err_sqr += (A[T%2][i][j][k] - (total/N))*(A[T%2][i][j][k] - (total/N));
}
}
}
printf("|rms(A) = %7.2f\t", sqrt(sum_err_sqr));
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
for (k = 0; k < N; k++) {
chtotal += ((char *)A[T%2][i][j])[k];
}
}
}
printf("|sum(rep(A)) = %d\n", chtotal);
#endif
for (l = 0; l < 2; l++){
for (i = 0; i < N; i++){
for (j = 0; j < N; j++)
free(A[l][i][j]); // = (double *) malloc(N * sizeof (double));
free(A[l][i]); // = (double **) malloc(N * sizeof(double *));
}
free(A[l]); // = (double ***) malloc(N * sizeof(double **));
}
return 0;
}
// icc -O3 -fp-model precise heat_1d_np.c -o op-heat-1d-np -lm
// /* @ begin PrimeTile (num_tiling_levels=1; first_depth=1; last_depth=-1; boundary_tiling_level=-1;) @*/
// /* @ begin PrimeRegTile (scalar_replacement=0; T1t5=4; T1t6=4; T1t7=4; T1t8=4; ) @*/
// /* @ end @*/
|
GB_unop__identity_fp32_int64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fp32_int64)
// op(A') function: GB (_unop_tran__identity_fp32_int64)
// C type: float
// A type: int64_t
// cast: float cij = (float) aij
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
float z = (float) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = (float) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fp32_int64)
(
float *Cx, // Cx and Ax may be aliased
const int64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
float z = (float) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int64_t aij = Ax [p] ;
float z = (float) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fp32_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
search.h | // -*- C++ -*-
// Copyright (C) 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the terms
// of the GNU General Public License as published by the Free Software
// Foundation; either version 3, or (at your option) any later
// version.
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file parallel/search.h
* @brief Parallel implementation base for std::search() and
* std::search_n().
* This file is a GNU parallel extension to the Standard C++ Library.
*/
// Written by Felix Putze.
#ifndef _GLIBCXX_PARALLEL_SEARCH_H
#define _GLIBCXX_PARALLEL_SEARCH_H 1
#include <bits/stl_algobase.h>
#include <parallel/parallel.h>
#include <parallel/equally_split.h>
namespace __gnu_parallel
{
/**
* @brief Precalculate __advances for Knuth-Morris-Pratt algorithm.
* @param __elements Begin iterator of sequence to search for.
* @param __length Length of sequence to search for.
* @param __off Returned __offsets.
*/
template<typename _RAIter, typename _DifferenceTp>
void
__calc_borders(_RAIter __elements, _DifferenceTp __length,
_DifferenceTp* __off)
{
typedef _DifferenceTp _DifferenceType;
__off[0] = -1;
if (__length > 1)
__off[1] = 0;
_DifferenceType __k = 0;
for (_DifferenceType __j = 2; __j <= __length; __j++)
{
while ((__k >= 0) && !(__elements[__k] == __elements[__j-1]))
__k = __off[__k];
__off[__j] = ++__k;
}
}
// Generic parallel find algorithm (requires random access iterator).
/** @brief Parallel std::search.
* @param __begin1 Begin iterator of first sequence.
* @param __end1 End iterator of first sequence.
* @param __begin2 Begin iterator of second sequence.
* @param __end2 End iterator of second sequence.
* @param __pred Find predicate.
* @return Place of finding in first sequences. */
template<typename __RAIter1,
typename __RAIter2,
typename _Pred>
__RAIter1
__search_template(__RAIter1 __begin1, __RAIter1 __end1,
__RAIter2 __begin2, __RAIter2 __end2,
_Pred __pred)
{
typedef std::iterator_traits<__RAIter1> _TraitsType;
typedef typename _TraitsType::difference_type _DifferenceType;
_GLIBCXX_CALL((__end1 - __begin1) + (__end2 - __begin2));
_DifferenceType __pattern_length = __end2 - __begin2;
// Pattern too short.
if(__pattern_length <= 0)
return __end1;
// Last point to start search.
_DifferenceType __input_length = (__end1 - __begin1) - __pattern_length;
// Where is first occurrence of pattern? defaults to end.
_DifferenceType __result = (__end1 - __begin1);
_DifferenceType *__splitters;
// Pattern too long.
if (__input_length < 0)
return __end1;
omp_lock_t __result_lock;
omp_init_lock(&__result_lock);
_ThreadIndex __num_threads = std::max<_DifferenceType>
(1, std::min<_DifferenceType>(__input_length,
__get_max_threads()));
_DifferenceType __advances[__pattern_length];
__calc_borders(__begin2, __pattern_length, __advances);
# pragma omp parallel num_threads(__num_threads)
{
# pragma omp single
{
__num_threads = omp_get_num_threads();
__splitters = new _DifferenceType[__num_threads + 1];
__equally_split(__input_length, __num_threads, __splitters);
}
_ThreadIndex __iam = omp_get_thread_num();
_DifferenceType __start = __splitters[__iam],
__stop = __splitters[__iam + 1];
_DifferenceType __pos_in_pattern = 0;
bool __found_pattern = false;
while (__start <= __stop && !__found_pattern)
{
// Get new value of result.
#pragma omp flush(__result)
// No chance for this thread to find first occurrence.
if (__result < __start)
break;
while (__pred(__begin1[__start + __pos_in_pattern],
__begin2[__pos_in_pattern]))
{
++__pos_in_pattern;
if (__pos_in_pattern == __pattern_length)
{
// Found new candidate for result.
omp_set_lock(&__result_lock);
__result = std::min(__result, __start);
omp_unset_lock(&__result_lock);
__found_pattern = true;
break;
}
}
// Make safe jump.
__start += (__pos_in_pattern - __advances[__pos_in_pattern]);
__pos_in_pattern = (__advances[__pos_in_pattern] < 0
? 0 : __advances[__pos_in_pattern]);
}
} //parallel
omp_destroy_lock(&__result_lock);
delete[] __splitters;
// Return iterator on found element.
return (__begin1 + __result);
}
} // end namespace
#endif /* _GLIBCXX_PARALLEL_SEARCH_H */
|
fatorial.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
long fat(int n) {
long res;
int i;
res = 1;
#pragma omp parallel for reduction(*:res)
for(i = 2; i <= n; i++){
res *= i;
}
return res;
}
int main(int argc, char **argv) {
int n;
long resultado;
if(argc < 2){
printf("uso ./fatorial <numero natural>\n");
exit(1);
}
n = atoi(argv[1]);
if(n < 0){
printf("Erro! Numero de entrada não é natural\n");
exit(1);
}
// omp_set_num_threads(1);
printf("Calculando fatorial de %d com %d threads.\n", n, omp_get_max_threads());
resultado = fat(n);
printf("fatorial(%d) = %ld\n", n, resultado);
return 0;
}
|
data.h | /*!
* Copyright (c) 2015 by Contributors
* \file data.h
* \brief The input data structure of xgboost.
* \author Tianqi Chen
*/
#ifndef XGBOOST_DATA_H_
#define XGBOOST_DATA_H_
#include <dmlc/base.h>
#include <dmlc/data.h>
#include <rabit/rabit.h>
#include <xgboost/base.h>
#include <xgboost/span.h>
#include <xgboost/host_device_vector.h>
#include <memory>
#include <numeric>
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
namespace xgboost {
// forward declare dmatrix.
class DMatrix;
/*! \brief data type accepted by xgboost interface */
enum class DataType : uint8_t {
kFloat32 = 1,
kDouble = 2,
kUInt32 = 3,
kUInt64 = 4
};
/*!
* \brief Meta information about dataset, always sit in memory.
*/
class MetaInfo {
public:
/*! \brief number of data fields in MetaInfo */
static constexpr uint64_t kNumField = 7;
/*! \brief number of rows in the data */
uint64_t num_row_{0};
/*! \brief number of columns in the data */
uint64_t num_col_{0};
/*! \brief number of nonzero entries in the data */
uint64_t num_nonzero_{0};
/*! \brief label of each instance */
HostDeviceVector<bst_float> labels_;
/*!
* \brief the index of begin and end of a group
* needed when the learning task is ranking.
*/
std::vector<bst_group_t> group_ptr_;
/*! \brief weights of each instance, optional */
HostDeviceVector<bst_float> weights_;
/*!
* \brief initialized margins,
* if specified, xgboost will start from this init margin
* can be used to specify initial prediction to boost from.
*/
HostDeviceVector<bst_float> base_margin_;
/*! \brief default constructor */
MetaInfo() = default;
MetaInfo& operator=(MetaInfo const& that) {
this->num_row_ = that.num_row_;
this->num_col_ = that.num_col_;
this->num_nonzero_ = that.num_nonzero_;
this->labels_.Resize(that.labels_.Size());
this->labels_.Copy(that.labels_);
this->group_ptr_ = that.group_ptr_;
this->weights_.Resize(that.weights_.Size());
this->weights_.Copy(that.weights_);
this->base_margin_.Resize(that.base_margin_.Size());
this->base_margin_.Copy(that.base_margin_);
return *this;
}
/*!
* \brief Get weight of each instances.
* \param i Instance index.
* \return The weight.
*/
inline bst_float GetWeight(size_t i) const {
return weights_.Size() != 0 ? weights_.HostVector()[i] : 1.0f;
}
/*! \brief get sorted indexes (argsort) of labels by absolute value (used by cox loss) */
inline const std::vector<size_t>& LabelAbsSort() const {
if (label_order_cache_.size() == labels_.Size()) {
return label_order_cache_;
}
label_order_cache_.resize(labels_.Size());
std::iota(label_order_cache_.begin(), label_order_cache_.end(), 0);
const auto& l = labels_.HostVector();
XGBOOST_PARALLEL_SORT(label_order_cache_.begin(), label_order_cache_.end(),
[&l](size_t i1, size_t i2) {return std::abs(l[i1]) < std::abs(l[i2]);});
return label_order_cache_;
}
/*! \brief clear all the information */
void Clear();
/*!
* \brief Load the Meta info from binary stream.
* \param fi The input stream
*/
void LoadBinary(dmlc::Stream* fi);
/*!
* \brief Save the Meta info to binary stream
* \param fo The output stream.
*/
void SaveBinary(dmlc::Stream* fo) const;
/*!
* \brief Set information in the meta info.
* \param key The key of the information.
* \param dptr The data pointer of the source array.
* \param dtype The type of the source data.
* \param num Number of elements in the source array.
*/
void SetInfo(const char* key, const void* dptr, DataType dtype, size_t num);
/*!
* \brief Set information in the meta info with array interface.
* \param key The key of the information.
* \param interface_str String representation of json format array interface.
*
* [ column_0, column_1, ... column_n ]
*
* Right now only 1 column is permitted.
*/
void SetInfo(const char* key, std::string const& interface_str);
private:
/*! \brief argsort of labels */
mutable std::vector<size_t> label_order_cache_;
};
/*! \brief Element from a sparse vector */
struct Entry {
/*! \brief feature index */
bst_feature_t index;
/*! \brief feature value */
bst_float fvalue;
/*! \brief default constructor */
Entry() = default;
/*!
* \brief constructor with index and value
* \param index The feature or row index.
* \param fvalue The feature value.
*/
XGBOOST_DEVICE Entry(bst_feature_t index, bst_float fvalue) : index(index), fvalue(fvalue) {}
/*! \brief reversely compare feature values */
inline static bool CmpValue(const Entry& a, const Entry& b) {
return a.fvalue < b.fvalue;
}
inline bool operator==(const Entry& other) const {
return (this->index == other.index && this->fvalue == other.fvalue);
}
};
/*!
* \brief Parameters for constructing batches.
*/
struct BatchParam {
/*! \brief The GPU device to use. */
int gpu_id;
/*! \brief Maximum number of bins per feature for histograms. */
int max_bin{0};
/*! \brief Page size for external memory mode. */
size_t gpu_page_size;
BatchParam() = default;
BatchParam(int32_t device, int32_t max_bin, size_t gpu_page_size = 0)
: gpu_id{device}, max_bin{max_bin}, gpu_page_size{gpu_page_size} {}
inline bool operator!=(const BatchParam& other) const {
return gpu_id != other.gpu_id || max_bin != other.max_bin ||
gpu_page_size != other.gpu_page_size;
}
};
/*!
* \brief In-memory storage unit of sparse batch, stored in CSR format.
*/
class SparsePage {
public:
// Offset for each row.
HostDeviceVector<bst_row_t> offset;
/*! \brief the data of the segments */
HostDeviceVector<Entry> data;
size_t base_rowid{};
/*! \brief an instance of sparse vector in the batch */
using Inst = common::Span<Entry const>;
/*! \brief get i-th row from the batch */
inline Inst operator[](size_t i) const {
const auto& data_vec = data.HostVector();
const auto& offset_vec = offset.HostVector();
size_t size;
// in distributed mode, some partitions may not get any instance for a feature. Therefore
// we should set the size as zero
if (rabit::IsDistributed() && i + 1 >= offset_vec.size()) {
size = 0;
} else {
size = offset_vec[i + 1] - offset_vec[i];
}
return {data_vec.data() + offset_vec[i],
static_cast<Inst::index_type>(size)};
}
/*! \brief constructor */
SparsePage() {
this->Clear();
}
/*! \return Number of instances in the page. */
inline size_t Size() const {
return offset.Size() == 0 ? 0 : offset.Size() - 1;
}
/*! \return estimation of memory cost of this page */
inline size_t MemCostBytes() const {
return offset.Size() * sizeof(size_t) + data.Size() * sizeof(Entry);
}
/*! \brief clear the page */
inline void Clear() {
base_rowid = 0;
auto& offset_vec = offset.HostVector();
offset_vec.clear();
offset_vec.push_back(0);
data.HostVector().clear();
}
/*! \brief Set the base row id for this page. */
inline void SetBaseRowId(size_t row_id) {
base_rowid = row_id;
}
SparsePage GetTranspose(int num_columns) const;
void SortRows() {
auto ncol = static_cast<bst_omp_uint>(this->Size());
#pragma omp parallel for default(none) shared(ncol) schedule(dynamic, 1)
for (bst_omp_uint i = 0; i < ncol; ++i) {
if (this->offset.HostVector()[i] < this->offset.HostVector()[i + 1]) {
std::sort(
this->data.HostVector().begin() + this->offset.HostVector()[i],
this->data.HostVector().begin() + this->offset.HostVector()[i + 1],
Entry::CmpValue);
}
}
}
/*!
* \brief Push row block into the page.
* \param batch the row batch.
*/
void Push(const dmlc::RowBlock<uint32_t>& batch);
/**
* \brief Pushes external data batch onto this page
*
* \tparam AdapterBatchT
* \param batch
* \param missing
* \param nthread
*
* \return The maximum number of columns encountered in this input batch. Useful when pushing many adapter batches to work out the total number of columns.
*/
template <typename AdapterBatchT>
uint64_t Push(const AdapterBatchT& batch, float missing, int nthread);
/*!
* \brief Push a sparse page
* \param batch the row page
*/
void Push(const SparsePage &batch);
/*!
* \brief Push a SparsePage stored in CSC format
* \param batch The row batch to be pushed
*/
void PushCSC(const SparsePage& batch);
};
class CSCPage: public SparsePage {
public:
CSCPage() : SparsePage() {}
explicit CSCPage(SparsePage page) : SparsePage(std::move(page)) {}
};
class SortedCSCPage : public SparsePage {
public:
SortedCSCPage() : SparsePage() {}
explicit SortedCSCPage(SparsePage page) : SparsePage(std::move(page)) {}
};
class EllpackPageImpl;
/*!
* \brief A page stored in ELLPACK format.
*
* This class uses the PImpl idiom (https://en.cppreference.com/w/cpp/language/pimpl) to avoid
* including CUDA-specific implementation details in the header.
*/
class EllpackPage {
public:
/*!
* \brief Default constructor.
*
* This is used in the external memory case. An empty ELLPACK page is constructed with its content
* set later by the reader.
*/
EllpackPage();
/*!
* \brief Constructor from an existing DMatrix.
*
* This is used in the in-memory case. The ELLPACK page is constructed from an existing DMatrix
* in CSR format.
*/
explicit EllpackPage(DMatrix* dmat, const BatchParam& param);
/*! \brief Destructor. */
~EllpackPage();
/*! \return Number of instances in the page. */
size_t Size() const;
/*! \brief Set the base row id for this page. */
void SetBaseRowId(size_t row_id);
const EllpackPageImpl* Impl() const { return impl_.get(); }
EllpackPageImpl* Impl() { return impl_.get(); }
private:
std::unique_ptr<EllpackPageImpl> impl_;
};
template<typename T>
class BatchIteratorImpl {
public:
virtual ~BatchIteratorImpl() = default;
virtual T& operator*() = 0;
virtual const T& operator*() const = 0;
virtual void operator++() = 0;
virtual bool AtEnd() const = 0;
};
template<typename T>
class BatchIterator {
public:
using iterator_category = std::forward_iterator_tag;
explicit BatchIterator(BatchIteratorImpl<T>* impl) { impl_.reset(impl); }
void operator++() {
CHECK(impl_ != nullptr);
++(*impl_);
}
T& operator*() {
CHECK(impl_ != nullptr);
return *(*impl_);
}
const T& operator*() const {
CHECK(impl_ != nullptr);
return *(*impl_);
}
bool operator!=(const BatchIterator& rhs) const {
CHECK(impl_ != nullptr);
return !impl_->AtEnd();
}
bool AtEnd() const {
CHECK(impl_ != nullptr);
return impl_->AtEnd();
}
private:
std::shared_ptr<BatchIteratorImpl<T>> impl_;
};
template<typename T>
class BatchSet {
public:
explicit BatchSet(BatchIterator<T> begin_iter) : begin_iter_(begin_iter) {}
BatchIterator<T> begin() { return begin_iter_; }
BatchIterator<T> end() { return BatchIterator<T>(nullptr); }
private:
BatchIterator<T> begin_iter_;
};
/*!
* \brief This is data structure that user can pass to DMatrix::Create
* to create a DMatrix for training, user can create this data structure
* for customized Data Loading on single machine.
*
* On distributed setting, usually an customized dmlc::Parser is needed instead.
*/
template<typename T>
class DataSource : public dmlc::DataIter<T> {
public:
/*!
* \brief Meta information about the dataset
* The subclass need to be able to load this correctly from data.
*/
MetaInfo info;
};
/*!
* \brief Internal data structured used by XGBoost during training.
* There are two ways to create a customized DMatrix that reads in user defined-format.
*
* - Provide a dmlc::Parser and pass into the DMatrix::Create
* - Alternatively, if data can be represented by an URL, define a new dmlc::Parser and register by
* DMLC_REGISTER_DATA_PARSER;
* - This works best for user defined data input source, such as data-base, filesystem.
* - Provide a DataSource, that can be passed to DMatrix::Create
* This can be used to re-use inmemory data structure into DMatrix.
*/
class DMatrix {
public:
/*! \brief default constructor */
DMatrix() = default;
/*! \brief meta information of the dataset */
virtual MetaInfo& Info() = 0;
/*! \brief meta information of the dataset */
virtual const MetaInfo& Info() const = 0;
/**
* \brief Gets batches. Use range based for loop over BatchSet to access individual batches.
*/
template<typename T>
BatchSet<T> GetBatches(const BatchParam& param = {});
template <typename T>
bool PageExists() const;
// the following are column meta data, should be able to answer them fast.
/*! \return Whether the data columns single column block. */
virtual bool SingleColBlock() const = 0;
/*! \brief virtual destructor */
virtual ~DMatrix() = default;
/*! \brief Whether the matrix is dense. */
bool IsDense() const {
return Info().num_nonzero_ == Info().num_row_ * Info().num_col_;
}
/*!
* \brief Load DMatrix from URI.
* \param uri The URI of input.
* \param silent Whether print information during loading.
* \param load_row_split Flag to read in part of rows, divided among the workers in distributed mode.
* \param file_format The format type of the file, used for dmlc::Parser::Create.
* By default "auto" will be able to load in both local binary file.
* \param page_size Page size for external memory.
* \return The created DMatrix.
*/
static DMatrix* Load(const std::string& uri,
bool silent,
bool load_row_split,
const std::string& file_format = "auto",
size_t page_size = kPageSize);
/**
* \brief Creates a new DMatrix from an external data adapter.
*
* \tparam AdapterT Type of the adapter.
* \param [in,out] adapter View onto an external data.
* \param missing Values to count as missing.
* \param nthread Number of threads for construction.
* \param cache_prefix (Optional) The cache prefix for external memory.
* \param page_size (Optional) Size of the page.
*
* \return a Created DMatrix.
*/
template <typename AdapterT>
static DMatrix* Create(AdapterT* adapter, float missing, int nthread,
const std::string& cache_prefix = "",
size_t page_size = kPageSize);
/*! \brief page size 32 MB */
static const size_t kPageSize = 32UL << 20UL;
protected:
virtual BatchSet<SparsePage> GetRowBatches() = 0;
virtual BatchSet<CSCPage> GetColumnBatches() = 0;
virtual BatchSet<SortedCSCPage> GetSortedColumnBatches() = 0;
virtual BatchSet<EllpackPage> GetEllpackBatches(const BatchParam& param) = 0;
virtual bool EllpackExists() const = 0;
virtual bool SparsePageExists() const = 0;
};
template<>
inline BatchSet<SparsePage> DMatrix::GetBatches(const BatchParam&) {
return GetRowBatches();
}
template<>
inline bool DMatrix::PageExists<EllpackPage>() const {
return this->EllpackExists();
}
template<>
inline bool DMatrix::PageExists<SparsePage>() const {
return this->SparsePageExists();
}
template<>
inline BatchSet<CSCPage> DMatrix::GetBatches(const BatchParam&) {
return GetColumnBatches();
}
template<>
inline BatchSet<SortedCSCPage> DMatrix::GetBatches(const BatchParam&) {
return GetSortedColumnBatches();
}
template<>
inline BatchSet<EllpackPage> DMatrix::GetBatches(const BatchParam& param) {
return GetEllpackBatches(param);
}
} // namespace xgboost
namespace dmlc {
DMLC_DECLARE_TRAITS(is_pod, xgboost::Entry, true);
}
#endif // XGBOOST_DATA_H_
|
stats_tools.c | /*Daala video codec
Copyright (c) 2013 Daala project contributors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS”
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <stdlib.h>
#include <string.h>
#include "stats_tools.h"
#include "od_defs.h"
#include "od_filter.h"
#include "od_intra.h"
#include "../src/dct.h"
#include "../src/intra.h"
#define PRINT_SCALE (0)
void mode_data_init(mode_data *_md,int _b_sz){
int i;
_md->n=0;
_md->mean=0;
_md->var=0;
for(i=0;i<B_SZ_MAX*B_SZ_MAX;i++){
_md->satd_avg[i]=0;
}
od_covmat_init(&_md->ref,_b_sz*_b_sz);
od_covmat_init(&_md->res,_b_sz*_b_sz);
}
void mode_data_clear(mode_data *_md){
od_covmat_clear(&_md->ref);
od_covmat_clear(&_md->res);
}
void mode_data_reset(mode_data *_md){
int i;
_md->n=0;
_md->mean=0;
_md->var=0;
for(i=0;i<B_SZ_MAX*B_SZ_MAX;i++){
_md->satd_avg[i]=0;
}
od_covmat_reset(&_md->ref);
od_covmat_reset(&_md->res);
}
/* update the input mean and variance */
void mode_data_add_input(mode_data *_md,const unsigned char *_data,int _stride,
int _b_sz){
int n;
int i;
int j;
n=_md->n*_b_sz*_b_sz;
for(j=0;j<_b_sz;j++){
for(i=0;i<_b_sz;i++){
double delta;
double s;
n++;
s=1.0/n;
delta=_data[_stride*j+i]*INPUT_SCALE-_md->mean;
_md->mean+=delta*s;
_md->var+=delta*delta*(n-1)*s;
}
}
_md->n++;
}
void mode_data_add_block(mode_data *_md,const od_coeff *_block,int _stride,
int _ref){
int j;
int i;
double buf[B_SZ*B_SZ];
for(j=0;j<B_SZ;j++){
for(i=0;i<B_SZ;i++){
buf[B_SZ*j+i]=_block[_stride*j+i];
}
}
if(_ref){
od_covmat_add(&_md->ref,buf,1);
}
else{
od_covmat_add(&_md->res,buf,1);
}
}
void mode_data_combine(mode_data *_a,const mode_data *_b){
double s;
double delta;
int i;
if(_b->n==0){
return;
}
s=((double)_b->n)/(_a->n+_b->n);
delta=_b->mean-_a->mean;
_a->mean+=delta*s;
for(i=0;i<B_SZ_MAX*B_SZ_MAX;i++){
_a->satd_avg[i]+=(_b->satd_avg[i]-_a->satd_avg[i])*s;
}
s*=_a->n;
_a->var+=_b->var+delta*s;
od_covmat_combine(&_a->ref,&_b->ref);
od_covmat_combine(&_a->res,&_b->res);
_a->n+=_b->n;
}
void mode_data_correct(mode_data *_md,int _b_sz){
_md->var/=_md->n*_b_sz*_b_sz;
od_covmat_correct(&_md->ref);
od_covmat_correct(&_md->res);
}
void mode_data_print(mode_data *_md,const char *_label,double *_scale,
int _b_sz){
double cg_ref;
double cg_res;
int v;
int u;
double satd_avg;
double bits_avg;
cg_ref=10*log10(_md->var);
cg_res=10*log10(_md->var);
satd_avg=0;
bits_avg=0;
for(v=0;v<_b_sz;v++){
for(u=0;u<_b_sz;u++){
int i;
int ii;
double b;
i=_b_sz*v+u;
ii=_b_sz*_b_sz*i+i;
cg_ref-=10*log10(_md->ref.cov[ii]*_scale[v]*_scale[u])/(_b_sz*_b_sz);
cg_res-=10*log10(_md->res.cov[ii]*_scale[v]*_scale[u])/(_b_sz*_b_sz);
satd_avg+=sqrt(_scale[v]*_scale[u])*_md->satd_avg[i];
b=sqrt(_scale[v]*_scale[u]*_md->res.cov[ii]/2);
bits_avg+=1+OD_LOG2(b)+M_LOG2E/b*_md->satd_avg[i];
}
}
printf("%s Blocks %5i SATD %G Bits %G Mean %G Var %G CgRef %G CgRes %G Pg %G\n",
_label,_md->n,satd_avg,bits_avg,_md->mean,_md->var,cg_ref,cg_res,cg_res-cg_ref);
}
void mode_data_params(mode_data *_this,double _b[B_SZ*B_SZ],double *_scale){
int v;
int u;
int i;
int ii;
for(v=0;v<B_SZ;v++){
for(u=0;u<B_SZ;u++){
i=(v*B_SZ+u);
ii=B_SZ*B_SZ*i+i;
_b[i]=sqrt(_scale[v]*_scale[u]*_this->res.cov[ii]/2);
}
}
}
void intra_stats_init(intra_stats *_this,int _b_sz_log){
int mode;
_this->b_sz_log=_b_sz_log;
mode_data_init(&_this->fr,1<<_b_sz_log);
for(mode=0;mode<OD_INTRA_NMODES;mode++){
mode_data_init(&_this->md[mode],1<<_b_sz_log);
}
}
void intra_stats_clear(intra_stats *_this){
int i;
mode_data_clear(&_this->fr);
for(i=0;i<OD_INTRA_NMODES;i++){
mode_data_clear(&_this->md[i]);
}
}
void intra_stats_reset(intra_stats *_this){
int i;
mode_data_reset(&_this->fr);
for(i=0;i<OD_INTRA_NMODES;i++){
mode_data_reset(&_this->md[i]);
}
}
void intra_stats_update(intra_stats *_this,const unsigned char *_data,
int _stride,int _mode,const od_coeff *_ref,int _ref_stride,
const double *_res,int _res_stride){
int b_sz;
mode_data *fr;
mode_data *md;
int j;
int i;
double buf[B_SZ_MAX*B_SZ_MAX];
b_sz=1<<_this->b_sz_log;
fr=&_this->fr;
md=&_this->md[_mode];
/* Update the input mean and variance. */
mode_data_add_input(fr,_data,_stride,b_sz);
mode_data_add_input(md,_data,_stride,b_sz);
/* Update the reference mean and covariance. */
for(j=0;j<b_sz;j++){
for(i=0;i<b_sz;i++){
buf[b_sz*j+i]=_ref[_ref_stride*j+i];
}
}
od_covmat_add(&fr->ref,buf,1);
od_covmat_add(&md->ref,buf,1);
/* Update the residual mean and covariance. */
for(j=0;j<b_sz;j++){
for(i=0;i<b_sz;i++){
buf[b_sz*j+i]=_res[_res_stride*j+i];
}
}
od_covmat_add(&fr->res,buf,1);
od_covmat_add(&md->res,buf,1);
/* Update the average SATD. */
for(j=0;j<b_sz;j++){
for(i=0;i<b_sz;i++){
double satd;
satd=abs(buf[b_sz*j+i]);
fr->satd_avg[b_sz*j+i]+=(satd-fr->satd_avg[b_sz*j+i])/fr->n;
md->satd_avg[b_sz*j+i]+=(satd-md->satd_avg[b_sz*j+i])/md->n;
}
}
}
void intra_stats_correct(intra_stats *_this){
int mode;
mode_data_correct(&_this->fr,1<<_this->b_sz_log);
for(mode=0;mode<OD_INTRA_NMODES;mode++){
mode_data_correct(&_this->md[mode],1<<_this->b_sz_log);
}
}
void intra_stats_print(intra_stats *_this,const char *_label,
double *_scale){
int mode;
printf("%s\n",_label);
for(mode=0;mode<OD_INTRA_NMODES;mode++){
char label[16];
sprintf(label,"Mode %i",mode);
mode_data_print(&_this->md[mode],label,_scale,1<<_this->b_sz_log);
}
mode_data_print(&_this->fr,"Pooled",_scale,1<<_this->b_sz_log);
}
void intra_stats_combine(intra_stats *_this,const intra_stats *_that){
int mode;
mode_data_combine(&_this->fr,&_that->fr);
for(mode=0;mode<OD_INTRA_NMODES;mode++){
mode_data_combine(&_this->md[mode],&_that->md[mode]);
}
}
/* compute the scale factors for the DCT and TDLT transforms */
double VP8_SCALE[OD_NBSIZES][B_SZ_MAX];
double OD_SCALE[OD_NBSIZES][B_SZ_MAX];
#define SCALE_BITS (14)
void vp8_scale_init(double *_vp8_scale,int _b_sz_log){
int b_sz;
int j;
int i;
od_coeff buf[B_SZ_MAX];
b_sz=1<<_b_sz_log;
for(i=0;i<b_sz;i++){
for(j=0;j<b_sz;j++){
buf[j]=i!=j?0:(1<<SCALE_BITS);
}
(*OD_IDCT_1D[_b_sz_log-OD_LOG_BSIZE0])(buf,1,buf);
_vp8_scale[i]=0;
for(j=0;j<b_sz;j++){
double c=((double)buf[j])/(1<<SCALE_BITS);
_vp8_scale[i]+=c*c;
}
#if PRINT_SCALE
printf("%s%- 24.18G",i==0?"":" ",_vp8_scale[i]);
#endif
}
#if PRINT_SCALE
printf("\n");
#endif
}
#define APPLY_PREFILTER (1)
#define APPLY_POSTFILTER (1)
void od_scale_init(double *_od_scale,int _b_sz_log){
int b_sz;
int i;
int j;
od_coeff buf[2*B_SZ_MAX];
b_sz=1<<_b_sz_log;
for(i=0;i<b_sz;i++){
for(j=0;j<2*b_sz;j++){
buf[j]=(b_sz>>1)+i!=j?0:(1<<SCALE_BITS);
}
(*OD_IDCT_1D[_b_sz_log-OD_LOG_BSIZE0])(&buf[b_sz>>1],1,&buf[b_sz>>1]);
#if APPLY_POSTFILTER
(*NE_POST_FILTER[_b_sz_log-OD_LOG_BSIZE0])(buf,buf);
(*NE_POST_FILTER[_b_sz_log-OD_LOG_BSIZE0])(&buf[b_sz],&buf[b_sz]);
#endif
_od_scale[i]=0;
for(j=0;j<2*b_sz;j++){
double c=((double)buf[j])/(1<<SCALE_BITS);
_od_scale[i]+=c*c;
}
#if PRINT_SCALE
printf("%s%- 24.18G",i==0?"":" ",_od_scale[i]);
#endif
}
#if PRINT_SCALE
printf("\n");
#endif
}
#define SCALE_SATD (1)
/* find the best vp8 mode */
int vp8_select_mode(const unsigned char *_data,int _stride,double *_weight){
int best_mode;
double best_satd;
double next_best_satd;
double *vp8_scale;
int mode;
best_mode=0;
best_satd=UINT_MAX;
next_best_satd=best_satd;
vp8_scale=VP8_SCALE[B_SZ_LOG-OD_LOG_BSIZE0];
for(mode=0;mode<OD_INTRA_NMODES;mode++){
unsigned char block[B_SZ*B_SZ];
od_coeff buf[B_SZ*B_SZ];
int j;
int i;
double satd;
memset(block,0,B_SZ*B_SZ);
vp8_intra_predict(block,B_SZ,_data,_stride,mode);
for(j=0;j<B_SZ;j++){
for(i=0;i<B_SZ;i++){
buf[B_SZ*j+i]=block[B_SZ*j+i]-_data[_stride*j+i];
}
}
#if B_SZ_LOG>=OD_LOG_BSIZE0&&B_SZ_LOG<OD_LOG_BSIZE0+OD_NBSIZES
(*OD_FDCT_2D[B_SZ_LOG-OD_LOG_BSIZE0])(buf,B_SZ,buf,B_SZ);
#else
# error "Need an fDCT implementation for this block size."
#endif
satd=0;
for(j=0;j<B_SZ;j++){
for(i=0;i<B_SZ;i++){
#if SCALE_SATD
satd+=sqrt(vp8_scale[j]*vp8_scale[i])*abs(buf[B_SZ*j+i]);
#else
satd+=abs(buf[B_SZ*j+i]);
#endif
}
}
if(satd<best_satd){
next_best_satd=best_satd;
best_satd=satd;
best_mode=mode;
}
else{
if(satd<next_best_satd){
next_best_satd=satd;
}
}
}
if(_weight!=NULL){
*_weight=best_mode!=0?next_best_satd-best_satd:1;
}
return best_mode;
}
int od_select_mode_bits(const od_coeff *_block,double *_weight,
double _b[OD_INTRA_NMODES][B_SZ*B_SZ]){
const od_coeff *c;
int best_mode;
double best_bits;
double next_best_bits;
double *od_scale;
int mode;
c=_block+4*B_SZ*B_SZ;
best_mode=0;
best_bits=UINT_MAX;
next_best_bits=best_bits;
od_scale=OD_SCALE[B_SZ_LOG-OD_LOG_BSIZE0];
for(mode=0;mode<OD_INTRA_NMODES;mode++){
double p[B_SZ*B_SZ];
double bits;
int j;
int i;
#if B_SZ_LOG>=OD_LOG_BSIZE0&&B_SZ_LOG<OD_LOG_BSIZE0+OD_NBSIZES
#if 0
(*OD_INTRA_MULT[B_SZ_LOG-OD_LOG_BSIZE0])(p,_block,_stride,mode);
#else
(*NE_INTRA_MULT[B_SZ_LOG-OD_LOG_BSIZE0])(p,B_SZ,_block,mode);
#endif
#else
# error "Need a predictor implementation for this block size."
#endif
bits=0;
for(j=0;j<B_SZ;j++){
for(i=0;i<B_SZ;i++){
double res;
res=sqrt(od_scale[j]*od_scale[i])*
abs(c[B_SZ*j+i]-(od_coeff)floor(p[B_SZ*j+i]+0.5));
bits+=1+OD_LOG2(_b[mode][j*B_SZ+i])+M_LOG2E/_b[mode][j*B_SZ+i]*res;
}
}
if(bits<best_bits){
next_best_bits=best_bits;
best_bits=bits;
best_mode=mode;
}
else{
if(bits<next_best_bits){
next_best_bits=bits;
}
}
}
if(_weight!=NULL){
*_weight=best_mode!=0?next_best_bits-best_bits:1;
}
return best_mode;
}
int od_select_mode_satd(const od_coeff *_block,double *_weight,int _b_sz_log){
int b_sz;
const od_coeff *c;
int best_mode;
double best_satd;
double next_best_satd;
double *od_scale;
int mode;
b_sz=1<<_b_sz_log;
c=_block+4*b_sz*b_sz;
best_mode=0;
best_satd=UINT_MAX;
next_best_satd=best_satd;
od_scale=OD_SCALE[_b_sz_log-OD_LOG_BSIZE0];
for(mode=0;mode<OD_INTRA_NMODES;mode++){
double p[B_SZ_MAX*B_SZ_MAX];
double satd;
int j;
int i;
(*NE_INTRA_MULT[_b_sz_log-OD_LOG_BSIZE0])(p,b_sz,_block,mode);
satd=0;
for(j=0;j<b_sz;j++){
for(i=0;i<b_sz;i++){
#if SCALE_SATD
satd+=sqrt(od_scale[j]*od_scale[i])*
abs(c[b_sz*j+i]-(od_coeff)floor(p[b_sz*j+i]+0.5));
#else
satd+=abs(c[b_sz*j+i]-(od_coeff)floor(p[b_sz*j+i]+0.5));
#endif
}
}
if(satd<best_satd){
next_best_satd=best_satd;
best_mode=mode;
best_satd=satd;
}
else{
if(satd<next_best_satd){
next_best_satd=satd;
}
}
}
if(_weight!=NULL){
*_weight=best_mode!=0?next_best_satd-best_satd:1;
}
return best_mode;
}
int ne_apply_to_blocks(void *_ctx,int _ctx_sz,int _plmask,int _padding,
plane_start_func _start,int _nfuncs,const block_func *_funcs,
plane_finish_func _finish,int _argc,const char *_argv[]){
int ai;
#pragma omp parallel for schedule(dynamic)
for(ai=1;ai<_argc;ai++){
FILE *fin;
video_input vid;
video_input_info info;
video_input_ycbcr ycbcr;
int pli;
int tid;
unsigned char *ctx;
fin=fopen(_argv[ai],"rb");
if(fin==NULL){
fprintf(stderr,"Could not open '%s' for reading.\n",_argv[ai]);
continue;
}
if(video_input_open(&vid,fin)<0){
fprintf(stderr,"Error reading video info from '%s'.\n",_argv[ai]);
continue;
}
video_input_get_info(&vid,&info);
if(video_input_fetch_frame(&vid,ycbcr,NULL)<0){
fprintf(stderr,"Error reading first frame from '%s'.\n",_argv[ai]);
continue;
}
tid=OD_OMP_GET_THREAD;
ctx=((unsigned char *)_ctx)+tid*_ctx_sz;
for(pli=0;pli<3;pli++){
if(_plmask&1<<pli){
int x0;
int y0;
int nxblocks;
int nyblocks;
get_intra_dims(&info,pli,_padding,&x0,&y0,&nxblocks,&nyblocks);
if(_start!=NULL){
(*_start)(ctx,_argv[ai],&info,pli,nxblocks,nyblocks);
}
if(_funcs!=NULL){
int f;
for(f=0;f<_nfuncs;f++){
if(_funcs[f]!=NULL){
const unsigned char *data;
int stride;
int bj;
int bi;
data=ycbcr[pli].data;
stride=ycbcr[pli].stride;
for(bj=0;bj<nyblocks;bj++){
int y;
y=y0+B_SZ*bj;
for(bi=0;bi<nxblocks;bi++){
int x;
x=x0+B_SZ*bi;
(*_funcs[f])(ctx,&data[stride*y+x],stride,bi,bj);
}
}
}
}
}
if(_finish!=NULL){
(*_finish)(ctx);
}
}
}
video_input_close(&vid);
}
return EXIT_SUCCESS;
}
|
omp_task_imp_shared.c | <ompts:test>
<ompts:testdescription> Test to see if implied shared works correctly</ompts:testdescription>
<ompts:ompversion>3.0</ompts:ompversion>
<ompts:directive>omp task</ompts:directive>
<ompts:dependences>omp single, omp task firstprivate</ompts:dependences>
<ompts:testcode>
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
/* Utility function do spend some time in a loop */
int <ompts:testcode:functionname>omp_task_imp_shared</ompts:testcode:functionname> (FILE * logFile)
{
int i = 0;
int k = 0;
int result = 0;
#pragma omp parallel
{
#pragma omp single
for (k = 0; k < NUM_TASKS; k++)
{
#pragma omp task <ompts:crosscheck> firstprivate(i) </ompts:crosscheck>
{
#pragma omp atomic
i++;
//this should be shared implicitly
}
}
}
result = i;
return (result == NUM_TASKS);
}
</ompts:testcode>
</ompts:test>
|
GB_unaryop__abs_fp64_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_fp64_uint16
// op(A') function: GB_tran__abs_fp64_uint16
// C type: double
// A type: uint16_t
// cast: double cij = (double) aij
// unaryop: cij = fabs (aij)
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = fabs (x) ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_FP64 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_fp64_uint16
(
double *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_fp64_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Repulsive_forces.c | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#include <omp.h>
double get_walltime() {
struct timeval tp; gettimeofday(&tp, NULL);
return (double) (tp.tv_sec + tp.tv_usec * 1e-6);
}
void force_repulsion(int np, const double *pos, double L, double krepulsion, double *forces) {
int i, j;
double posi [3]; double rvec [3];
double s2, s, f;
// initialize forces to zero
for (i = 0; i < 3 * np; i++) {
forces[i] = 0.;
}
// loop over all pairs
for (i = 0; i < np; i++) {
posi[0] = pos[3 * i];
posi[1] = pos[3 * i + 1];
posi[2] = pos[3 * i + 2];
for (j = i + 1; j < np; j++) {
// compute minimum image difference
rvec[0] = remainder(posi[0] - pos[3 * j], L);
rvec[1] = remainder(posi[1] - pos[3 * j + 1], L);
rvec[2] = remainder(posi[2] - pos[3 * j + 2], L);
s2 = rvec [0]* rvec [0] + rvec [1]* rvec [1] + rvec [2]* rvec [2];
if (s2 < 4) {
s = sqrt(s2);
rvec[0] /= s;
rvec[1] /= s;
rvec[2] /= s;
f = krepulsion*(2. - s);
forces[3 * i] += f * rvec[0];
forces[3 * i + 1] += f * rvec[1];
forces[3 * i + 2] += f * rvec[2];
forces[3 * j] += -f * rvec[0];
forces[3 * j + 1] += -f * rvec[1];
forces[3 * j + 2] += -f * rvec[2];
}
}
}
}
void force_repulsion_Parallel(int np, const double *pos, double L, double krepulsion, double *forces) {
int i, j;
double posi [3]; double rvec [3];
double s2, s, f;
omp_set_num_threads(omp_get_num_threads());
// initialize forces to zero
#pragma omp parallel for schedule(static) private (i)
for (i = 0; i < 3 * np; i++) {
forces[i] = 0.;
}
// loop over all pairs
#pragma omp parallel for schedule(static) private (i, j, rvec, posi, s2, s, f) shared(np, L , krepulsion, pos, forces)
for (i = 0; i < np; i++) {
posi[0] = pos[3 * i];
posi[1] = pos[3 * i + 1];
posi[2] = pos[3 * i + 2];
for (j = i + 1; j < np; j++) {
// compute minimum image difference
rvec[0] = remainder(posi[0] - pos[3 * j], L);
rvec[1] = remainder(posi[1] - pos[3 * j + 1], L);
rvec[2] = remainder(posi[2] - pos[3 * j + 2], L);
s2 = rvec[0] * rvec[0] + rvec[1] * rvec[1] + rvec[2] * rvec[2];
if (s2 < 4) {
s = sqrt(s2);
rvec[0] /= s;
rvec[1] /= s;
rvec[2] /= s;
f = krepulsion*(2. - s);
#pragma omp critical(forces)
{
forces[3 * i] += f * rvec[0];
forces[3 * i + 1] += f * rvec[1];
forces[3 * i + 2] += f * rvec[2];
forces[3 * j] += -f * rvec[0];
forces[3 * j + 1] += -f * rvec[1];
forces[3 * j + 2] += -f * rvec[2];
}
}
}
}
}
int comparator(double *forces1, double *forces2, int np, double epsilon) {
for(int i = 0; i < 3 * np; i++) {
if(fabs(forces1[i] - forces2[i]) > epsilion) {
return 0;
}
}
return 1;
}
int main(int argc, char *argv[]) {
int i;
int np = 1000; // default number of particles
double phi = 0.3; // volume fraction
double krepulsion = 125.; // force constant
double *pos; double *forces1; double *forces2;
double L, time0 , time1;
double epsilon = 1e-10; // significance value to compare the forces.
if (argc > 1) {
np = atoi(argv[1]);
}
L = pow(4./3. * 3.1415926536 * np/phi, 1./3.);
// generate random particle positions inside simulation box
forces1 = (double *) malloc(3 * np * sizeof(double));
forces2 = (double *) malloc(3 * np * sizeof(double));
pos = (double *) malloc(3 * np * sizeof(double));
for (i = 0; i < 3 * np; i++) {
pos[i] = rand()/(double)RAND_MAX * L;
}
// measure sequential execution time of this function
time0 = get_walltime ();
force_repulsion(np, pos, L, krepulsion, forces1);
time1 = get_walltime ();
printf("number of particles: %d\n", np);
printf("sequential elapsed time: %f\n", time1 - time0);
// measure parallel execution time of this function
time0 = get_walltime ();
force_repulsion_Parallel(np, pos, L, krepulsion, forces2);
time1 = get_walltime ();
printf("parallel elapsed time: %f\n", time1 - time0);
int result = comparator(forces1, forces2, np, epsilon);
if(result == 1) {
printf("forces match\n");
}
else {
printf("forces do not match\n");
}
free(forces1);
free(forces2);
free(pos);
return 0;
}
|
simulation.c | #include "tallybench_header.h"
void run_history_based_simulation(Inputs in, double *** restrict tallies, int * restrict num_nucs, int ** restrict mats, double ** restrict concs, int ** restrict spatial_mats, Reactor_Mesh * restrict RM, unsigned long ** restrict tally_hits)
{
// Particle History Loop
#pragma omp parallel for schedule(static)
for( int p = 0; p < in.particles; p++ )
{
unsigned long seed = ((unsigned long) p+ (unsigned long)1)* (unsigned long) 13371337;
double weight = 1.0;
// Tally Loop
for( int e = 0; e < in.events_per_particle; e++ )
{
// Sample Cartesian location randomly from somewhere inside reactor mesh
Coord location = sample_random_location( RM, &seed );
// Determine which assembly it is in
int assembly = find_assembly_id( RM, location );
// Determine which pin it is in
int pin = find_pin_id( RM, assembly, location );
// Determine which axial location of the pin it is in, and therefore
// which final tally bin should be accessed
int axial_id = find_axial_id(RM, location);
int bin = pin * RM->axial_regions + axial_id;
// Determine which material it is in (determined by pin)
int mat = RM->assemblies[assembly].material_ids[0][pin];
// Pick phi
double phi = rn(&seed);
// Score Accumulator
double total_score = 0.0;
// Nuclides Loop
for( int n = 0; n < num_nucs[mat] ; n++ )
{
// Find isotope index
int idx = mats[mat][n];
// TODO: look this up based on material and nuclide (?)
// This is covered by XSbench & RSBench (micro_xs would be cached)
// In OpenMC, micro_xs is a global array (of length n_nuclides) that is
// unique to each thread (i.e., threadprivate). In the event based model,
// this would have to be changed so that the cache was stored along
// with the particle. The data is stored as an array of structs.
//
// SO: in the history based model, I would just
//
double micro_xs = rn(&seed);
// Look up nuclide density in material
double rho = concs[mat][n];
// Compute Score
// In OpenMC, this is computed as: micro_xs[nuclide_id].total * atom_density * flux
// with flux = particle_weight * distance_travelled
double score = micro_xs * rho * phi * weight;
//printf("micro_xs = %lf rho = %lf phi = %lf weight = %lf\n", micro_xs, rho, phi, weight);
//printf("tallying score = %lf\n", score);
// Normalize Score by number of tallies
//score /= in.total_tallies;
// Tally score to global array
#pragma omp atomic
tallies[assembly][bin][idx] += score;
// Accumulate score
total_score += score;
}
#ifdef TEST
#pragma omp atomic
tally_hits[assembly][bin]++;
#endif
// Reduce particle weight based on score
weight *= 1.0/total_score;
}
}
}
|
BinaryLutN.h | // --------------------------------------------------------------------------
// Binary Brain -- binary neural net framework
//
// Copyright (C) 2018 by Ryuji Fuchikami
// https://github.com/ryuz
// ryuji.fuchikami@nifty.com
// --------------------------------------------------------------------------
#pragma once
#include <array>
#include <vector>
#include "bb/BinaryLutModel.h"
namespace bb {
// テーブルサイズ固定LUT
template <int N = 6, typename FT = Bit, typename BT = float>
class BinaryLutN : public BinaryLutModel
{
using _super = BinaryLutModel;
public:
static inline std::string ClassName(void) { return "BinaryLut" + std::to_string(N); }
static inline std::string ObjectName(void){ return ClassName() + "_" + DataType<FT>::Name() + "_" + DataType<BT>::Name(); }
std::string GetModelName(void) const override { return ClassName(); }
std::string GetObjectName(void) const override { return ObjectName(); }
protected:
bool m_host_only = false;
bool m_host_simd = true;
std::string m_connection;
indices_t m_input_shape;
indices_t m_output_shape;
static int const m_table_size = (1 << N);
static int const m_table_bits = sizeof(std::int32_t) * 8;
static int const m_table_unit = (m_table_size + (m_table_bits - 1)) / m_table_bits;
Tensor_<std::int32_t> m_table;
Tensor_<std::int32_t> m_input_index;
std::mt19937_64 m_mt;
public:
struct create_t
{
indices_t output_shape;
std::string connection="";
std::uint64_t seed = 1;
};
protected:
BinaryLutN(create_t const &create)
{
BB_ASSERT(!create.output_shape.empty());
m_mt.seed(create.seed);
m_output_shape = create.output_shape;
m_connection = create.connection;
m_input_index.Resize(CalcShapeSize(m_output_shape), (index_t)N);
m_table.Resize(CalcShapeSize(m_output_shape), (index_t)m_table_unit);
}
void CommandProc(std::vector<std::string> args) override
{
// HostOnlyモード設定
if (args.size() == 2 && args[0] == "host_only")
{
m_host_only = EvalBool(args[1]);
}
// Host SIMDモード設定
if (args.size() == 2 && args[0] == "host_simd")
{
m_host_simd = EvalBool(args[1]);
}
}
public:
~BinaryLutN() {}
static std::shared_ptr<BinaryLutN> Create(create_t const &create)
{
return std::shared_ptr<BinaryLutN>(new BinaryLutN(create));
}
static std::shared_ptr<BinaryLutN> Create(indices_t const &output_shape, std::uint64_t seed = 1)
{
create_t create;
create.output_shape = output_shape;
create.seed = seed;
return Create(create);
}
static std::shared_ptr<BinaryLutN> Create(index_t output_node_size, std::uint64_t seed = 1)
{
create_t create;
create.output_shape.resize(1);
create.output_shape[0] = output_node_size;
create.seed = seed;
return Create(create);
}
static std::shared_ptr<BinaryLutN> Create(void)
{
return Create(create_t());
}
#ifdef BB_PYBIND11 // python用
static std::shared_ptr<BinaryLutN> CreatePy(
indices_t output_shape,
std::string connection="",
std::uint64_t seed = 1)
{
create_t create;
create.output_shape = output_shape;
create.connection = connection;
create.seed = seed;
return Create(create);
}
#endif
auto lock_InputIndex(void) { return m_input_index.Lock(); }
auto lock_InputIndex_const(void) const { return m_input_index.LockConst(); }
// 疎結合の管理
index_t GetNodeConnectionSize(index_t node) const override
{
return N;
}
void SetNodeConnectionIndex(index_t node, index_t input_index, index_t input_node) override
{
BB_ASSERT(node >= 0 && node < CalcShapeSize(m_output_shape));
BB_ASSERT(input_index >= 0 && input_index < N);
BB_DEBUG_ASSERT(input_node >= 0 && input_node < GetInputNodeSize());
auto ptr = lock_InputIndex();
ptr(node, input_index) = (std::int32_t)input_node;
}
index_t GetNodeConnectionIndex(index_t node, index_t input_index) const override
{
BB_ASSERT(node >= 0 && node < CalcShapeSize(m_output_shape));
BB_ASSERT(input_index >= 0 && input_index < N);
auto ptr = lock_InputIndex_const();
return (index_t)ptr(node, input_index);
}
// LUT操作の定義
int GetLutTableSize(index_t node) const
{
return m_table_size;
}
void SetLutTable(index_t node, int bitpos, bool value) override
{
BB_ASSERT(node >= 0 && node < CalcShapeSize(m_output_shape));
BB_ASSERT(bitpos >= 0 && bitpos < m_table_size);
int idx = bitpos / m_table_bits;
int bit = bitpos % m_table_bits;
auto ptr = m_table.Lock();
if ( value ) {
ptr(node, idx) |= (1 << bit);
}
else {
ptr(node, idx) &= ~(1 << bit);
}
}
bool GetLutTable(index_t node, int bitpos) const override
{
BB_ASSERT(node >= 0 && node < CalcShapeSize(m_output_shape));
BB_ASSERT(bitpos >= 0 && bitpos < (1 << N));
int idx = bitpos / m_table_bits;
int bit = bitpos % m_table_bits;
auto ptr = m_table.LockConst();
return ((ptr(node, idx) & (1 << bit)) != 0);
}
/**
* @brief 入力のshape設定
* @detail 入力のshape設定
* @param shape 新しいshape
* @return なし
*/
indices_t SetInputShape(indices_t shape) override
{
// 設定済みなら何もしない
if ( shape == this->GetInputShape() ) {
return this->GetOutputShape();
}
// 形状設定
m_input_shape = shape;
// 接続初期化
this->InitializeNodeInput(m_mt(), m_connection);
// テーブル初期化
this->InitializeLutTable(m_mt());
return m_output_shape;
}
/**
* @brief 出力のshape設定
* @detail 出力のshape設定
* 出力ノード数が変わらない限りshpeは自由
* @param shape 新しいshape
* @return なし
*/
void SetOutputShape(indices_t const &shape)
{
BB_ASSERT(CalcShapeSize(shape) == this->m_output_node_size);
m_output_shape = shape;
}
/**
* @brief 入力形状取得
* @detail 入力形状を取得する
* @return 入力形状を返す
*/
indices_t GetInputShape(void) const override
{
return m_input_shape;
}
/**
* @brief 出力形状取得
* @detail 出力形状を取得する
* @return 出力形状を返す
*/
indices_t GetOutputShape(void) const override
{
return m_output_shape;
}
private:
template<int LUT, int VAL>
inline __m256i lut_mask_unit(__m256i& val, __m256i& lut)
{
if ((LUT & (1 << VAL)) == 0) {
return _mm256_andnot_si256(val, lut);
}
else {
return _mm256_and_si256(val, lut);
}
}
template<int LUT>
inline void lut6_mask(__m256i& msk, __m256i lut, __m256i val[6])
{
lut = lut_mask_unit<LUT, 0>(val[0], lut);
lut = lut_mask_unit<LUT, 1>(val[1], lut);
lut = lut_mask_unit<LUT, 2>(val[2], lut);
lut = lut_mask_unit<LUT, 3>(val[3], lut);
lut = lut_mask_unit<LUT, 4>(val[4], lut);
lut = lut_mask_unit<LUT, 5>(val[5], lut);
msk = _mm256_or_si256(msk, lut);
}
inline bool GetLutTableFromPtr(Tensor_<std::int32_t>::ConstPtr ptr, index_t node, int index)
{
auto idx = index / m_table_bits;
auto bit = index % m_table_bits;
return (((ptr(node, idx) >> bit) & 1) != 0);
}
public:
FrameBuffer Forward(FrameBuffer x_buf, bool train = true) override
{
BB_ASSERT(x_buf.GetType() == DataType<FT>::type);
// SetInputShpaeされていなければ初回に設定
if (x_buf.GetShape() != m_input_shape) {
SetInputShape(x_buf.GetShape());
}
// 出力を設定
FrameBuffer y_buf(x_buf.GetFrameSize(), m_output_shape, DataType<FT>::type);
#ifdef BB_WITH_CUDA
if ( N == 6 && DataType<FT>::type == BB_TYPE_BIT && !m_host_only
&& x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) {
auto x_ptr = x_buf.LockDeviceMemoryConst();
auto y_ptr = y_buf.LockDeviceMemory(true);
auto input_index_ptr = m_input_index.LockDeviceMemoryConst();
auto table_ptr = m_table.LockDeviceMemoryConst();
bbcu_bit_BinatyLut6_Forward
(
(int const *)x_ptr.GetAddr(),
(int *)y_ptr.GetAddr(),
(int const *)input_index_ptr.GetAddr(),
(int const *)table_ptr.GetAddr(),
(int )y_buf.GetNodeSize(),
(int )y_buf.GetFrameSize(),
(int )(y_buf.GetFrameStride() / sizeof(int))
);
return y_buf;
}
#endif
if ( N == 6 && DataType<FT>::type == BB_TYPE_BIT && m_host_simd ) {
auto x_ptr = x_buf.LockConst<Bit>();
auto y_ptr = y_buf.Lock<Bit>(true);
auto input_index_ptr = m_input_index.LockConst();
auto table_ptr = m_table.LockConst();
index_t node_size = y_buf.GetNodeSize();
index_t frame_size = y_buf.GetFrameStride() / sizeof(__m256i);
#pragma omp parallel for
for (index_t node = 0; node < node_size; ++node) {
__m256i* x_addr[6];
__m256i* y_addr;
__m256i x[6];
x_addr[0] = (__m256i*)x_ptr.GetAddr(input_index_ptr(node, 0));
x_addr[1] = (__m256i*)x_ptr.GetAddr(input_index_ptr(node, 1));
x_addr[2] = (__m256i*)x_ptr.GetAddr(input_index_ptr(node, 2));
x_addr[3] = (__m256i*)x_ptr.GetAddr(input_index_ptr(node, 3));
x_addr[4] = (__m256i*)x_ptr.GetAddr(input_index_ptr(node, 4));
x_addr[5] = (__m256i*)x_ptr.GetAddr(input_index_ptr(node, 5));
y_addr = (__m256i*)y_ptr.GetAddr(node);
char table[64];
std::int32_t t0 = table_ptr(node, 0);
std::int32_t t1 = table_ptr(node, 1);
for (int i = 0; i < 32; ++i) { table[i] = (t0 & (1 << i)) ? -1 : 0; }
for (int i = 0; i < 32; ++i) { table[i+32] = (t1 & (1 << i)) ? -1 : 0; }
for (index_t frame = 0; frame < frame_size; ++frame) {
// input
x[0] = _mm256_loadu_si256(&x_addr[0][frame]);
x[1] = _mm256_loadu_si256(&x_addr[1][frame]);
x[2] = _mm256_loadu_si256(&x_addr[2][frame]);
x[3] = _mm256_loadu_si256(&x_addr[3][frame]);
x[4] = _mm256_loadu_si256(&x_addr[4][frame]);
x[5] = _mm256_loadu_si256(&x_addr[5][frame]);
// LUT
__m256i y = _mm256_set1_epi8(0);
lut6_mask< 0>(y, _mm256_set1_epi8(table[0]), x);
lut6_mask< 1>(y, _mm256_set1_epi8(table[1]), x);
lut6_mask< 2>(y, _mm256_set1_epi8(table[2]), x);
lut6_mask< 3>(y, _mm256_set1_epi8(table[3]), x);
lut6_mask< 4>(y, _mm256_set1_epi8(table[4]), x);
lut6_mask< 5>(y, _mm256_set1_epi8(table[5]), x);
lut6_mask< 6>(y, _mm256_set1_epi8(table[6]), x);
lut6_mask< 7>(y, _mm256_set1_epi8(table[7]), x);
lut6_mask< 8>(y, _mm256_set1_epi8(table[8]), x);
lut6_mask< 9>(y, _mm256_set1_epi8(table[9]), x);
lut6_mask<10>(y, _mm256_set1_epi8(table[10]), x);
lut6_mask<11>(y, _mm256_set1_epi8(table[11]), x);
lut6_mask<12>(y, _mm256_set1_epi8(table[12]), x);
lut6_mask<13>(y, _mm256_set1_epi8(table[13]), x);
lut6_mask<14>(y, _mm256_set1_epi8(table[14]), x);
lut6_mask<15>(y, _mm256_set1_epi8(table[15]), x);
lut6_mask<16>(y, _mm256_set1_epi8(table[16]), x);
lut6_mask<17>(y, _mm256_set1_epi8(table[17]), x);
lut6_mask<18>(y, _mm256_set1_epi8(table[18]), x);
lut6_mask<19>(y, _mm256_set1_epi8(table[19]), x);
lut6_mask<20>(y, _mm256_set1_epi8(table[20]), x);
lut6_mask<21>(y, _mm256_set1_epi8(table[21]), x);
lut6_mask<22>(y, _mm256_set1_epi8(table[22]), x);
lut6_mask<23>(y, _mm256_set1_epi8(table[23]), x);
lut6_mask<24>(y, _mm256_set1_epi8(table[24]), x);
lut6_mask<25>(y, _mm256_set1_epi8(table[25]), x);
lut6_mask<26>(y, _mm256_set1_epi8(table[26]), x);
lut6_mask<27>(y, _mm256_set1_epi8(table[27]), x);
lut6_mask<28>(y, _mm256_set1_epi8(table[28]), x);
lut6_mask<29>(y, _mm256_set1_epi8(table[29]), x);
lut6_mask<30>(y, _mm256_set1_epi8(table[30]), x);
lut6_mask<31>(y, _mm256_set1_epi8(table[31]), x);
lut6_mask<32>(y, _mm256_set1_epi8(table[32]), x);
lut6_mask<33>(y, _mm256_set1_epi8(table[33]), x);
lut6_mask<34>(y, _mm256_set1_epi8(table[34]), x);
lut6_mask<35>(y, _mm256_set1_epi8(table[35]), x);
lut6_mask<36>(y, _mm256_set1_epi8(table[36]), x);
lut6_mask<37>(y, _mm256_set1_epi8(table[37]), x);
lut6_mask<38>(y, _mm256_set1_epi8(table[38]), x);
lut6_mask<39>(y, _mm256_set1_epi8(table[39]), x);
lut6_mask<40>(y, _mm256_set1_epi8(table[40]), x);
lut6_mask<41>(y, _mm256_set1_epi8(table[41]), x);
lut6_mask<42>(y, _mm256_set1_epi8(table[42]), x);
lut6_mask<43>(y, _mm256_set1_epi8(table[43]), x);
lut6_mask<44>(y, _mm256_set1_epi8(table[44]), x);
lut6_mask<45>(y, _mm256_set1_epi8(table[45]), x);
lut6_mask<46>(y, _mm256_set1_epi8(table[46]), x);
lut6_mask<47>(y, _mm256_set1_epi8(table[47]), x);
lut6_mask<48>(y, _mm256_set1_epi8(table[48]), x);
lut6_mask<49>(y, _mm256_set1_epi8(table[49]), x);
lut6_mask<50>(y, _mm256_set1_epi8(table[50]), x);
lut6_mask<51>(y, _mm256_set1_epi8(table[51]), x);
lut6_mask<52>(y, _mm256_set1_epi8(table[52]), x);
lut6_mask<53>(y, _mm256_set1_epi8(table[53]), x);
lut6_mask<54>(y, _mm256_set1_epi8(table[54]), x);
lut6_mask<55>(y, _mm256_set1_epi8(table[55]), x);
lut6_mask<56>(y, _mm256_set1_epi8(table[56]), x);
lut6_mask<57>(y, _mm256_set1_epi8(table[57]), x);
lut6_mask<58>(y, _mm256_set1_epi8(table[58]), x);
lut6_mask<59>(y, _mm256_set1_epi8(table[59]), x);
lut6_mask<60>(y, _mm256_set1_epi8(table[60]), x);
lut6_mask<61>(y, _mm256_set1_epi8(table[61]), x);
lut6_mask<62>(y, _mm256_set1_epi8(table[62]), x);
lut6_mask<63>(y, _mm256_set1_epi8(table[63]), x);
_mm256_storeu_si256(&y_addr[frame], y);
}
}
return y_buf;
}
{
// 汎用版
auto x_ptr = x_buf.LockConst<FT>();
auto y_ptr = y_buf.Lock<FT>();
auto input_index_ptr = m_input_index.LockConst();
auto table_ptr = m_table.LockConst();
index_t frame_size = x_buf.GetFrameSize();
index_t node_size = this->GetOutputNodeSize();
#pragma omp parallel for
for (index_t node = 0; node < node_size; ++node) {
for (index_t frame = 0; frame < frame_size; ++frame) {
int index = 0;
int mask = 1;
for (index_t i = 0; i < N; i++) {
index_t input_node = input_index_ptr(node, i);
bool x = (x_ptr.Get(frame, input_node) != 0);
index |= x ? mask : 0;
mask <<= 1;
}
auto y = GetLutTableFromPtr(table_ptr, node, index);
y_ptr.Set(frame, node, y);
}
}
return y_buf;
}
}
// Backwardは存在しない
FrameBuffer Backward(FrameBuffer dy_buf) override
{
if (dy_buf.Empty()) {
return dy_buf;
}
FrameBuffer dx_buf(dy_buf.GetFrameSize(), m_input_shape, DataType<BT>::type);
return dx_buf;
}
// シリアライズ
protected:
void DumpObjectData(std::ostream &os) const override
{
// バージョン
std::int64_t ver = 1;
bb::SaveValue(os, ver);
// 親クラス
_super::DumpObjectData(os);
// メンバ
bb::SaveValue(os, m_host_only);
bb::SaveValue(os, m_host_simd);
bb::SaveValue(os, m_connection);
bb::SaveValue(os, m_input_shape);
bb::SaveValue(os, m_output_shape);
m_table.DumpObject(os);
m_input_index.DumpObject(os);
}
void LoadObjectData(std::istream &is) override
{
// バージョン
std::int64_t ver;
bb::LoadValue(is, ver);
BB_ASSERT(ver == 1);
// 親クラス
_super::LoadObjectData(is);
// メンバ
bb::LoadValue(is, m_host_only);
bb::LoadValue(is, m_host_simd);
bb::LoadValue(is, m_connection);
bb::LoadValue(is, m_input_shape);
bb::LoadValue(is, m_output_shape);
m_table.LoadObject(is);
m_input_index.LoadObject(is);
}
};
} |
DRB022-reductionmissing-var-yes.c | /*
Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it andor
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http:www.gnu.org/licenses/>.
*/
/*
This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it.
*/
/*
glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default.
*/
/*
wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is
synchronized with ISOIEC 10646:2017, fifth edition, plus
the following additions from Amendment 1 to the fifth edition:
- 56 emoji characters
- 285 hentaigana
- 3 additional Zanabazar Square characters
*/
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https:github.comLLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A kernel for two level parallelizable loop with reduction:
if reduction(+:sum) is missing, there is race condition.
Data race pairs:
sum@72:7 vs. sum@72:7
sum@72:7 vs. sum@72:13
*/
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char * argv[])
{
int i, j;
float temp, sum = 0.0;
int len = 100;
float u[len][len];
int _ret_val_0;
if (argc>1)
{
len=atoi(argv[1]);
}
#pragma cetus private(i, j)
#pragma loop name main#0
#pragma cetus parallel
#pragma omp parallel for private(i, j)
for (i=0; i<len; i ++ )
{
#pragma cetus private(j)
#pragma loop name main#0#0
#pragma cetus parallel
#pragma omp parallel for private(j)
for (j=0; j<len; j ++ )
{
u[i][j]=0.5;
}
}
#pragma cetus private(i, j, temp)
#pragma loop name main#1
#pragma cetus reduction(+: sum)
#pragma cetus parallel
#pragma omp parallel for private(i, j, temp) reduction(+: sum)
for (i=0; i<len; i ++ )
{
#pragma cetus private(j, temp)
#pragma loop name main#1#0
#pragma cetus reduction(+: sum)
#pragma cetus parallel
#pragma omp parallel for private(j, temp) reduction(+: sum)
for (j=0; j<len; j ++ )
{
temp=u[i][j];
sum=(sum+(temp*temp));
}
}
printf("sum = %f\n", sum);
_ret_val_0=0;
return _ret_val_0;
}
|
p2p.c | /*
Copyright (c) 2013, Intel Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/*******************************************************************
NAME: Pipeline
PURPOSE: This program tests the efficiency with which point-to-point
synchronization can be carried out. It does so by executing
a pipelined algorithm on an m*n grid. The first array dimension
is distributed among the ranks (stripwise decomposition).
USAGE: The program takes as input the dimensions of the grid, and the
number of times we loop over the grid
p2p <#threads> <# iterations> <m> <n>
The output consists of diagnostics to make sure the
algorithm worked, and of timing statistics.
FUNCTIONS CALLED:
Other than MPI or standard C functions, the following
functions are used in this program:
wtime()
bail_out()
HISTORY: - Written by Rob Van der Wijngaart, March 2006.
- modified by Rob Van der Wijngaart, August 2006:
* changed boundary conditions and stencil computation to avoid
overflow
* introduced multiple iterations over grid and dependency between
iterations
**********************************************************************************/
#include <par-res-kern_general.h>
#include <par-res-kern_mpiomp.h>
/* define shorthand for flag with cache line padding */
#define LINEWORDS 16
#define flag(TID,j) flag[((TID)+(j)*nthread)*LINEWORDS]
#define ARRAY(i,j) vector[i+1+(j)*(segment_size+1)]
double * RESTRICT vector;/* array holding grid values */
int main(int argc, char ** argv)
{
int my_ID; /* rank */
int TID; /* thread ID */
int root=0, final; /* IDs of root rank and rank that verifies result */
long m, n; /* grid dimensions */
double local_pipeline_time, /* timing parameters */
pipeline_time,
avgtime;
double epsilon = 1.e-8; /* error tolerance */
double corner_val; /* verification value at top right corner of grid */
int i, j, iter, ID;/* dummies */
int iterations; /* number of times to run the pipeline algorithm */
long start, end; /* start and end of grid slice owned by calling rank */
long segment_size;
int *flag; /* used for pairwise synchronizations */
long *tstart, *tend;/* starts and ends of grid slices for respective threads */
long *tsegment_size;
int nthread; /* number of threads */
int error=0; /* error flag */
int Num_procs; /* Number of ranks */
long total_length; /* total required length to store grid values */
int provided; /* MPI level of thread support */
int true, false; /* toggled booleans used for synchronization */
/*********************************************************************************
** Initialize the MPI environment
**********************************************************************************/
int requested = MPI_THREAD_MULTIPLE;
MPI_Init_thread(&argc,&argv, requested, &provided);
MPI_Comm_rank(MPI_COMM_WORLD, &my_ID);
if (my_ID == 0) {
printf("Parallel Research Kernels version %s\n", PRKVERSION);
printf("MPI+OpenMP pipeline execution on 2D grid\n");
}
if (requested<provided) {
if (my_ID==0) printf("ERROR: requested=%s less than provided=%s\n",
PRK_MPI_THREAD_STRING(requested),PRK_MPI_THREAD_STRING(provided));
bail_out(requested-provided);
}
MPI_Comm_size(MPI_COMM_WORLD, &Num_procs);
/* set final equal to highest rank, because it computes verification value */
final = Num_procs-1;
/*********************************************************************
** process, test and broadcast input parameter
*********************************************************************/
if (my_ID == root){
if (argc != 5){
printf("Usage: %s <#threads> <#iterations> <1st array dimension> <2nd array dimension>\n",
*argv);
error = 1;
goto ENDOFTESTS;
}
/* Take number of threads to request from command line */
nthread = atoi(*++argv);
if ((nthread < 1) || (nthread > MAX_THREADS)) {
printf("ERROR: Invalid number of threads: %d\n", nthread);
error = 1;
goto ENDOFTESTS;
}
iterations = atoi(*++argv);
if (iterations < 1){
printf("ERROR: iterations must be >= 1 : %d \n",iterations);
error = 1;
goto ENDOFTESTS;
}
m = atol(*++argv);
n = atol(*++argv);
if (m < 1 || n < 1){
printf("ERROR: grid dimensions must be positive: %ld, %ld \n", m, n);
error = 1;
goto ENDOFTESTS;
}
if (m<Num_procs) {
printf("ERROR: First grid dimension %ld smaller than number of ranks %d\n",
m, Num_procs);
error = 1;
goto ENDOFTESTS;
}
ENDOFTESTS:;
}
bail_out(error);
/* Broadcast benchmark data to all ranks */
MPI_Bcast(&m, 1, MPI_LONG, root, MPI_COMM_WORLD);
MPI_Bcast(&n, 1, MPI_LONG, root, MPI_COMM_WORLD);
MPI_Bcast(&iterations, 1, MPI_INT, root, MPI_COMM_WORLD);
MPI_Bcast(&nthread, 1, MPI_INT, root, MPI_COMM_WORLD);
omp_set_num_threads(nthread);
if (my_ID == root) {
printf("Number of ranks = %i\n",Num_procs);
printf("Number of threads = %d\n", omp_get_max_threads());
printf("Grid sizes = %ld, %ld\n", m, n);
printf("Number of iterations = %d\n", iterations);
#if SYNCHRONOUS
printf("Handshake between neighbor threads\n");
#else
printf("No handshake between neighbor threads\n");
#endif
}
int leftover;
segment_size = m/Num_procs;
leftover = m%Num_procs;
if (my_ID < leftover) {
start = (segment_size+1)* my_ID;
end = start + segment_size;
}
else {
start = (segment_size+1) * leftover + segment_size * (my_ID-leftover);
end = start + segment_size -1;
}
/* now set segment_size to the value needed by the calling rank */
segment_size = end-start+1;
/* total_length takes into account one ghost cell on left side of segment */
total_length = ((end-start+1)+1)*n;
vector = (double *) prk_malloc(sizeof(double)*total_length);
if (vector == NULL) {
printf("Could not allocate space for grid slice of %ld by %ld points",
segment_size, n);
printf(" on rank %d\n", my_ID);
error = 1;
}
bail_out(error);
/* now divide the rank's grid slice among the threads */
tstart = (long *) prk_malloc(3*nthread*sizeof(long));
if (!tstart) {
printf("ERROR: Could not allocate space for array of slice boundaries\n");
exit(EXIT_FAILURE);
}
tend = tstart + nthread;
tsegment_size = tend + nthread;
tstart[0] = start;
for (TID=0; TID<nthread; TID++) {
tsegment_size[TID] = segment_size/nthread;
if (TID < (segment_size%nthread)) tsegment_size[TID]++;
if (TID>0) tstart[TID] = tend[TID-1]+1;
tend[TID] = tstart[TID]+tsegment_size[TID]-1;
}
flag = (int *) prk_malloc(sizeof(int)*nthread*LINEWORDS*n);
if (!flag) {
printf("ERROR: Could not allocate space for synchronization flags\n");
exit(EXIT_FAILURE);
}
#pragma omp parallel private(i, j, iter, true, false)
{
int TID = omp_get_thread_num();
/* clear the array */
for (j=0; j<n; j++)
for (i=tstart[TID]-1; i<=tend[TID]; i++) {
ARRAY(i-start,j) = 0.0;
}
/* set boundary values (bottom and left side of grid */
if (my_ID==0 && TID==0) for (j=0; j<n; j++) ARRAY(tstart[TID]-start,j) = (double) j;
for (i=tstart[TID]-1; i<=tend[TID]; i++) {
ARRAY(i-start,0) = (double) i;
}
#pragma omp barrier
if (TID==0) {
/* redefine start and end for calling rank to reflect local indices */
if (my_ID==0) start = 1;
else start = 0;
end = segment_size-1;
/* redefine tstart and tend for calling thread to reflect local indices */
tstart[0] = start;
tend[0] = tsegment_size[0]-1;
for (ID=1; ID<nthread; ID++) {
tstart[ID] = tend[ID-1]+1;
tend[ID] = tstart[ID]+tsegment_size[ID]-1;
}
}
/* set flags to zero to indicate no data is available yet */
true = 1; false = !true;
for (j=0; j<n; j++) flag(TID,j) = 0;
/* need barrier after setting flags, to make sure each is visible to all threads
and to synchronize before iterations start */
#pragma omp barrier
for (iter=0; iter<=iterations; iter++) {
#if !SYNCHRONOUS
/* true and false toggle each iteration */
true = (iter+1)%2; false = !true;
#endif
/* start timer after a warmup iteration */
if (iter == 1) {
#pragma omp barrier
if (TID==0) {
/* No critical required here because only called from master thread */
MPI_Barrier(MPI_COMM_WORLD);
local_pipeline_time = wtime();
}
}
if ((Num_procs==1) && (TID==0)) { /* first thread waits for corner value */
while (flag(0,0) == true) {
#pragma omp flush
}
#if SYNCHRONOUS
flag(0,0)= true;
#pragma omp flush
#endif
}
/* execute pipeline algorithm for grid lines 1 through n-1 (skip bottom line) */
for (j=1; j<n; j++) {
/* if I am not at the left boundary, I need to wait for my left neighbor to
send data */
if (TID==0){
if (my_ID > 0) {
MPI_Recv(&(ARRAY(start-1,j)), 1, MPI_DOUBLE, my_ID-1, j,
MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
}
else {
while (flag(TID-1,j) == false) {
#pragma omp flush
}
#if SYNCHRONOUS
flag(TID-1,j)= false;
#pragma omp flush
#endif
}
for (i=tstart[TID]; i<= tend[TID]; i++) {
ARRAY(i,j) = ARRAY(i-1,j) + ARRAY(i,j-1) - ARRAY(i-1,j-1);
}
/* if not on right boundary, signal right neighbor it has new data */
if (TID < nthread-1) {
#if SYNCHRONOUS
while (flag(TID,j) == true) {
#pragma omp flush
}
#endif
flag(TID,j) = true;
#pragma omp flush
}
else { /* if not on the right boundary, send data to my right neighbor */
if (my_ID < Num_procs-1) {
MPI_Send(&(ARRAY(end,j)), 1, MPI_DOUBLE, my_ID+1, j, MPI_COMM_WORLD);
}
}
}
/* copy top right corner value to bottom left corner to create dependency */
if (Num_procs>1) {
if (TID==nthread-1 && my_ID==final) {
corner_val = -ARRAY(end,n-1);
MPI_Send(&corner_val,1,MPI_DOUBLE,root,888,MPI_COMM_WORLD);
}
if (TID==0 && my_ID==root) {
MPI_Recv(&(ARRAY(0,0)),1,MPI_DOUBLE,final,888,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
}
}
else {
if (TID==nthread-1) { /* if on right boundary, copy top right corner value
to bottom left corner to create dependency and signal completion */
ARRAY(0,0) = -ARRAY(m-1,n-1);
#if SYNCHRONOUS
while (flag(0,0) == false) {
#pragma omp flush
}
flag(0,0) = false;
#else
#pragma omp flush
flag(0,0) = true;
#endif
#pragma omp flush
}
}
} /* end of iterations */
} /* end of parallel section */
local_pipeline_time = wtime() - local_pipeline_time;
MPI_Reduce(&local_pipeline_time, &pipeline_time, 1, MPI_DOUBLE, MPI_MAX, final,
MPI_COMM_WORLD);
/*******************************************************************************
** Analyze and output results.
********************************************************************************/
/* verify correctness, using top right value */
corner_val = (double) ((iterations+1)*(m+n-2));
if (my_ID == final) {
if (fabs(ARRAY(end,n-1)-corner_val)/corner_val >= epsilon) {
printf("ERROR: checksum %lf does not match verification value %lf\n",
ARRAY(end,n-1), corner_val);
error = 1;
}
}
bail_out(error);
if (my_ID == final) {
avgtime = pipeline_time/iterations;
#if VERBOSE
printf("Solution validates; verification value = %lf\n", corner_val);
printf("Point-to-point synchronizations/s: %lf\n",
((float)((n-1)*(Num_procs-1)))/(avgtime));
#else
printf("Solution validates\n");
#endif
printf("Rate (MFlops/s): %lf Avg time (s): %lf\n",
1.0E-06 * 2 * ((double)((m-1)*(n-1)))/avgtime, avgtime);
}
MPI_Finalize();
exit(EXIT_SUCCESS);
} /* end of main */
|
psd.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP SSSSS DDDD %
% P P SS D D %
% PPPP SSS D D %
% P SS D D %
% P SSSSS DDDD %
% %
% %
% Read/Write Adobe Photoshop Image Format %
% %
% Software Design %
% Cristy %
% Leonard Rosenthol %
% July 1992 %
% Dirk Lemstra %
% December 2013 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/channel.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/registry.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#ifdef MAGICKCORE_ZLIB_DELEGATE
#include <zlib.h>
#endif
#include "psd-private.h"
/*
Define declaractions.
*/
#define MaxPSDChannels 56
#define PSDQuantum(x) (((ssize_t) (x)+1) & -2)
/*
Enumerated declaractions.
*/
typedef enum
{
Raw = 0,
RLE = 1,
ZipWithoutPrediction = 2,
ZipWithPrediction = 3
} PSDCompressionType;
typedef enum
{
BitmapMode = 0,
GrayscaleMode = 1,
IndexedMode = 2,
RGBMode = 3,
CMYKMode = 4,
MultichannelMode = 7,
DuotoneMode = 8,
LabMode = 9
} PSDImageType;
/*
Typedef declaractions.
*/
typedef struct _ChannelInfo
{
short
type;
size_t
size;
} ChannelInfo;
typedef struct _MaskInfo
{
Image
*image;
RectangleInfo
page;
unsigned char
background,
flags;
} MaskInfo;
typedef struct _LayerInfo
{
ChannelInfo
channel_info[MaxPSDChannels];
char
blendkey[4];
Image
*image;
MaskInfo
mask;
Quantum
opacity;
RectangleInfo
page;
size_t
offset_x,
offset_y;
unsigned char
clipping,
flags,
name[257],
visible;
unsigned short
channels;
StringInfo
*info;
} LayerInfo;
/*
Forward declarations.
*/
static MagickBooleanType
WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P S D %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPSD()() returns MagickTrue if the image format type, identified by the
% magick string, is PSD.
%
% The format of the IsPSD method is:
%
% MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"8BPS",4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPSDImage() reads an Adobe Photoshop image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadPSDImage method is:
%
% Image *ReadPSDImage(image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const char *CompositeOperatorToPSDBlendMode(Image *image)
{
switch (image->compose)
{
case ColorBurnCompositeOp:
return(image->endian == LSBEndian ? "vidi" : "idiv");
case ColorDodgeCompositeOp:
return(image->endian == LSBEndian ? " vid" : "div ");
case ColorizeCompositeOp:
return(image->endian == LSBEndian ? "rloc" : "colr");
case DarkenCompositeOp:
return(image->endian == LSBEndian ? "krad" : "dark");
case DifferenceCompositeOp:
return(image->endian == LSBEndian ? "ffid" : "diff");
case DissolveCompositeOp:
return(image->endian == LSBEndian ? "ssid" : "diss");
case ExclusionCompositeOp:
return(image->endian == LSBEndian ? "dums" : "smud");
case HardLightCompositeOp:
return(image->endian == LSBEndian ? "tiLh" : "hLit");
case HardMixCompositeOp:
return(image->endian == LSBEndian ? "xiMh" : "hMix");
case HueCompositeOp:
return(image->endian == LSBEndian ? " euh" : "hue ");
case LightenCompositeOp:
return(image->endian == LSBEndian ? "etil" : "lite");
case LinearBurnCompositeOp:
return(image->endian == LSBEndian ? "nrbl" : "lbrn");
case LinearDodgeCompositeOp:
return(image->endian == LSBEndian ? "gddl" : "lddg");
case LinearLightCompositeOp:
return(image->endian == LSBEndian ? "tiLl" : "lLit");
case LuminizeCompositeOp:
return(image->endian == LSBEndian ? " mul" : "lum ");
case MultiplyCompositeOp:
return(image->endian == LSBEndian ? " lum" : "mul ");
case OverlayCompositeOp:
return(image->endian == LSBEndian ? "revo" : "over");
case PinLightCompositeOp:
return(image->endian == LSBEndian ? "tiLp" : "pLit");
case SaturateCompositeOp:
return(image->endian == LSBEndian ? " tas" : "sat ");
case ScreenCompositeOp:
return(image->endian == LSBEndian ? "nrcs" : "scrn");
case SoftLightCompositeOp:
return(image->endian == LSBEndian ? "tiLs" : "sLit");
case VividLightCompositeOp:
return(image->endian == LSBEndian ? "tiLv" : "vLit");
case OverCompositeOp:
default:
return(image->endian == LSBEndian ? "mron" : "norm");
}
}
/*
For some reason Photoshop seems to blend semi-transparent pixels with white.
This method reverts the blending. This can be disabled by setting the
option 'psd:alpha-unblend' to off.
*/
static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info,
Image *image,ExceptionInfo* exception)
{
const char
*option;
MagickBooleanType
status;
ssize_t
y;
if ((image->alpha_trait != BlendPixelTrait) ||
(image->colorspace != sRGBColorspace))
return(MagickTrue);
option=GetImageOption(image_info,"psd:alpha-unblend");
if (IsStringFalse(option) != MagickFalse)
return(MagickTrue);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
register ssize_t
i;
gamma=QuantumScale*GetPixelAlpha(image, q);
if (gamma != 0.0 && gamma != 1.0)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
if (channel != AlphaPixelChannel)
q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma);
}
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static inline CompressionType ConvertPSDCompression(
PSDCompressionType compression)
{
switch (compression)
{
case RLE:
return RLECompression;
case ZipWithPrediction:
case ZipWithoutPrediction:
return ZipCompression;
default:
return NoCompression;
}
}
static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity,
MagickBooleanType revert,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying layer opacity %.20g", (double) opacity);
if (opacity == OpaqueAlpha)
return(MagickTrue);
if (image->alpha_trait != BlendPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (revert == MagickFalse)
SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))*
opacity),q);
else if (opacity > 0)
SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/
(MagickRealType) opacity)),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask,
Quantum background,MagickBooleanType revert,ExceptionInfo *exception)
{
Image
*complete_mask;
MagickBooleanType
status;
PixelInfo
color;
ssize_t
y;
if (image->alpha_trait == UndefinedPixelTrait)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying opacity mask");
complete_mask=CloneImage(image,0,0,MagickTrue,exception);
if (complete_mask == (Image *) NULL)
return(MagickFalse);
complete_mask->alpha_trait=BlendPixelTrait;
GetPixelInfo(complete_mask,&color);
color.red=(MagickRealType) background;
(void) SetImageColor(complete_mask,&color,exception);
status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue,
mask->page.x-image->page.x,mask->page.y-image->page.y,exception);
if (status == MagickFalse)
{
complete_mask=DestroyImage(complete_mask);
return(status);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register Quantum
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception);
if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
alpha,
intensity;
alpha=(MagickRealType) GetPixelAlpha(image,q);
intensity=GetPixelIntensity(complete_mask,p);
if (revert == MagickFalse)
SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q);
else if (intensity > 0)
SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q);
q+=GetPixelChannels(image);
p+=GetPixelChannels(complete_mask);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
complete_mask=DestroyImage(complete_mask);
return(status);
}
static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info,
ExceptionInfo *exception)
{
char
*key;
RandomInfo
*random_info;
StringInfo
*key_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" preserving opacity mask");
random_info=AcquireRandomInfo();
key_info=GetRandomKey(random_info,2+1);
key=(char *) GetStringInfoDatum(key_info);
key[8]=(char) layer_info->mask.background;
key[9]='\0';
layer_info->mask.image->page.x+=layer_info->page.x;
layer_info->mask.image->page.y+=layer_info->page.y;
(void) SetImageRegistry(ImageRegistryType,(const char *) key,
layer_info->mask.image,exception);
(void) SetImageArtifact(layer_info->image,"psd:opacity-mask",
(const char *) key);
key_info=DestroyStringInfo(key_info);
random_info=DestroyRandomInfo(random_info);
}
static ssize_t DecodePSDPixels(const size_t number_compact_pixels,
const unsigned char *compact_pixels,const ssize_t depth,
const size_t number_pixels,unsigned char *pixels)
{
#define CheckNumberCompactPixels \
if (packets == 0) \
return(i); \
packets--
#define CheckNumberPixels(count) \
if (((ssize_t) i + count) > (ssize_t) number_pixels) \
return(i); \
i+=count
int
pixel;
register ssize_t
i,
j;
size_t
length;
ssize_t
packets;
packets=(ssize_t) number_compact_pixels;
for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); )
{
packets--;
length=(size_t) (*compact_pixels++);
if (length == 128)
continue;
if (length > 128)
{
length=256-length+1;
CheckNumberCompactPixels;
pixel=(*compact_pixels++);
for (j=0; j < (ssize_t) length; j++)
{
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(pixel >> 7) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 6) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 5) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 4) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 3) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 2) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 1) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(unsigned char) ((pixel >> 6) & 0x03);
*pixels++=(unsigned char) ((pixel >> 4) & 0x03);
*pixels++=(unsigned char) ((pixel >> 2) & 0x03);
*pixels++=(unsigned char) ((pixel & 0x03) & 0x03);
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(unsigned char) ((pixel >> 4) & 0xff);
*pixels++=(unsigned char) ((pixel & 0x0f) & 0xff);
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(unsigned char) pixel;
break;
}
}
}
continue;
}
length++;
for (j=0; j < (ssize_t) length; j++)
{
CheckNumberCompactPixels;
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(*compact_pixels >> 6) & 0x03;
*pixels++=(*compact_pixels >> 4) & 0x03;
*pixels++=(*compact_pixels >> 2) & 0x03;
*pixels++=(*compact_pixels & 0x03) & 0x03;
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(*compact_pixels >> 4) & 0xff;
*pixels++=(*compact_pixels & 0x0f) & 0xff;
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(*compact_pixels);
break;
}
}
compact_pixels++;
}
}
return(i);
}
static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info,
const ssize_t number_layers)
{
ssize_t
i;
for (i=0; i<number_layers; i++)
{
if (layer_info[i].image != (Image *) NULL)
layer_info[i].image=DestroyImage(layer_info[i].image);
if (layer_info[i].mask.image != (Image *) NULL)
layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image);
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
return (LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline size_t GetPSDPacketSize(const Image *image)
{
if (image->storage_class == PseudoClass)
{
if (image->colors > 256)
return(2);
}
if (image->depth > 16)
return(4);
if (image->depth > 8)
return(2);
return(1);
}
static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image)
{
if (psd_info->version == 1)
return((MagickSizeType) ReadBlobLong(image));
return((MagickSizeType) ReadBlobLongLong(image));
}
static inline size_t GetPSDRowSize(Image *image)
{
if (image->depth == 1)
return(((image->columns+7)/8)*GetPSDPacketSize(image));
else
return(image->columns*GetPSDPacketSize(image));
}
static const char *ModeToString(PSDImageType type)
{
switch (type)
{
case BitmapMode: return "Bitmap";
case GrayscaleMode: return "Grayscale";
case IndexedMode: return "Indexed";
case RGBMode: return "RGB";
case CMYKMode: return "CMYK";
case MultichannelMode: return "Multichannel";
case DuotoneMode: return "Duotone";
case LabMode: return "L*A*B";
default: return "unknown";
}
}
static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickBooleanType
status;
channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~
AlphaChannel));
status=NegateImage(image,MagickFalse,exception);
(void) SetImageChannelMask(image,channel_mask);
return(status);
}
static StringInfo *ParseImageResourceBlocks(PSDInfo *psd_info,Image *image,
const unsigned char *blocks,size_t length,ExceptionInfo *exception)
{
const unsigned char
*p;
ssize_t
offset;
StringInfo
*profile;
unsigned char
name_length;
unsigned int
count;
unsigned short
id,
short_sans;
if (length < 16)
return((StringInfo *) NULL);
profile=BlobToStringInfo((const unsigned char *) NULL,length);
SetStringInfoDatum(profile,blocks);
SetStringInfoName(profile,"8bim");
for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); )
{
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p+=4;
p=PushShortPixel(MSBEndian,p,&id);
p=PushCharPixel(p,&name_length);
if ((name_length % 2) == 0)
name_length++;
p+=name_length;
if (p > (blocks+length-4))
break;
p=PushLongPixel(MSBEndian,p,&count);
offset=(ssize_t) count;
if (((p+offset) < blocks) || ((p+offset) > (blocks+length)))
break;
switch (id)
{
case 0x03ed:
{
char
value[MagickPathExtent];
unsigned short
resolution;
/*
Resolution info.
*/
if (offset < 16)
break;
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.x=(double) resolution;
(void) FormatLocaleString(value,MagickPathExtent,"%g",
image->resolution.x);
(void) SetImageProperty(image,"tiff:XResolution",value,exception);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.y=(double) resolution;
(void) FormatLocaleString(value,MagickPathExtent,"%g",
image->resolution.y);
(void) SetImageProperty(image,"tiff:YResolution",value,exception);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
image->units=PixelsPerInchResolution;
break;
}
case 0x0421:
{
if ((offset > 4) && (*(p+4) == 0))
psd_info->has_merged_image=MagickFalse;
p+=offset;
break;
}
default:
{
p+=offset;
break;
}
}
if ((offset & 0x01) != 0)
p++;
}
return(profile);
}
static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode)
{
if (mode == (const char *) NULL)
return(OverCompositeOp);
if (LocaleNCompare(mode,"norm",4) == 0)
return(OverCompositeOp);
if (LocaleNCompare(mode,"mul ",4) == 0)
return(MultiplyCompositeOp);
if (LocaleNCompare(mode,"diss",4) == 0)
return(DissolveCompositeOp);
if (LocaleNCompare(mode,"diff",4) == 0)
return(DifferenceCompositeOp);
if (LocaleNCompare(mode,"dark",4) == 0)
return(DarkenCompositeOp);
if (LocaleNCompare(mode,"lite",4) == 0)
return(LightenCompositeOp);
if (LocaleNCompare(mode,"hue ",4) == 0)
return(HueCompositeOp);
if (LocaleNCompare(mode,"sat ",4) == 0)
return(SaturateCompositeOp);
if (LocaleNCompare(mode,"colr",4) == 0)
return(ColorizeCompositeOp);
if (LocaleNCompare(mode,"lum ",4) == 0)
return(LuminizeCompositeOp);
if (LocaleNCompare(mode,"scrn",4) == 0)
return(ScreenCompositeOp);
if (LocaleNCompare(mode,"over",4) == 0)
return(OverlayCompositeOp);
if (LocaleNCompare(mode,"hLit",4) == 0)
return(HardLightCompositeOp);
if (LocaleNCompare(mode,"sLit",4) == 0)
return(SoftLightCompositeOp);
if (LocaleNCompare(mode,"smud",4) == 0)
return(ExclusionCompositeOp);
if (LocaleNCompare(mode,"div ",4) == 0)
return(ColorDodgeCompositeOp);
if (LocaleNCompare(mode,"idiv",4) == 0)
return(ColorBurnCompositeOp);
if (LocaleNCompare(mode,"lbrn",4) == 0)
return(LinearBurnCompositeOp);
if (LocaleNCompare(mode,"lddg",4) == 0)
return(LinearDodgeCompositeOp);
if (LocaleNCompare(mode,"lLit",4) == 0)
return(LinearLightCompositeOp);
if (LocaleNCompare(mode,"vLit",4) == 0)
return(VividLightCompositeOp);
if (LocaleNCompare(mode,"pLit",4) == 0)
return(PinLightCompositeOp);
if (LocaleNCompare(mode,"hMix",4) == 0)
return(HardMixCompositeOp);
return(OverCompositeOp);
}
static inline void ReversePSDString(Image *image,char *p,size_t length)
{
char
*q;
if (image->endian == MSBEndian)
return;
q=p+length;
for(--q; p < q; ++p, --q)
{
*p = *p ^ *q,
*q = *p ^ *q,
*p = *p ^ *q;
}
}
static inline void SetPSDPixel(Image *image,const size_t channels,
const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q,
ExceptionInfo *exception)
{
if (image->storage_class == PseudoClass)
{
PixelInfo
*color;
if (type == 0)
{
if (packet_size == 1)
SetPixelIndex(image,ScaleQuantumToChar(pixel),q);
else
SetPixelIndex(image,ScaleQuantumToShort(pixel),q);
}
color=image->colormap+(ssize_t) ConstrainColormapIndex(image,
(ssize_t) GetPixelIndex(image,q),exception);
if ((type == 0) && (channels > 1))
return;
else
color->alpha=(MagickRealType) pixel;
SetPixelViaPixelInfo(image,color,q);
return;
}
switch (type)
{
case -1:
{
SetPixelAlpha(image,pixel,q);
break;
}
case -2:
case 0:
{
SetPixelRed(image,pixel,q);
break;
}
case -3:
case 1:
{
SetPixelGreen(image,pixel,q);
break;
}
case -4:
case 2:
{
SetPixelBlue(image,pixel,q);
break;
}
case 3:
{
if (image->colorspace == CMYKColorspace)
SetPixelBlack(image,pixel,q);
else
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
case 4:
{
if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) &&
(channels > 3))
break;
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
}
}
static MagickBooleanType ReadPSDChannelPixels(Image *image,
const size_t channels,const ssize_t row,const ssize_t type,
const unsigned char *pixels,ExceptionInfo *exception)
{
Quantum
pixel;
register const unsigned char
*p;
register Quantum
*q;
register ssize_t
x;
size_t
packet_size;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (Quantum *) NULL)
return MagickFalse;
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
if (packet_size == 2)
{
unsigned short
nibble;
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
else
{
MagickFloatType
nibble;
p=PushFloatPixel(MSBEndian,p,&nibble);
pixel=ClampToQuantum((MagickRealType) (QuantumRange*nibble));
}
if (image->depth > 1)
{
SetPSDPixel(image,channels,type,packet_size,pixel,q,exception);
q+=GetPixelChannels(image);
}
else
{
ssize_t
bit,
number_bits;
number_bits=(ssize_t) image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit = 0; bit < (ssize_t) number_bits; bit++)
{
SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception);
q+=GetPixelChannels(image);
x++;
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels,
const ssize_t type,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
row_size;
ssize_t
count,
y;
unsigned char
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RAW");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memset(pixels,0,row_size*sizeof(*pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,row_size,pixels);
if (count != (ssize_t) row_size)
{
status=MagickFalse;
break;
}
status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
static inline MagickOffsetType *ReadPSDRLESizes(Image *image,
const PSDInfo *psd_info,const size_t size)
{
MagickOffsetType
*sizes;
ssize_t
y;
sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes));
if(sizes != (MagickOffsetType *) NULL)
{
for (y=0; y < (ssize_t) size; y++)
{
if (psd_info->version == 1)
sizes[y]=(MagickOffsetType) ReadBlobShort(image);
else
sizes[y]=(MagickOffsetType) ReadBlobLong(image);
}
}
return sizes;
}
static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info,
const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
length,
row_size;
ssize_t
count,
y;
unsigned char
*compact_pixels,
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RLE compressed");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
if ((MagickOffsetType) length < sizes[y])
length=(size_t) sizes[y];
if (length > (row_size+2048)) /* arbitrary number */
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename);
}
compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels));
if (compact_pixels == (unsigned char *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(compact_pixels,0,length*sizeof(*compact_pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,(size_t) sizes[y],compact_pixels);
if (count != (ssize_t) sizes[y])
break;
count=DecodePSDPixels((size_t) sizes[y],compact_pixels,
(ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels,
exception);
if (status == MagickFalse)
break;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels,
const ssize_t type,const PSDCompressionType compression,
const size_t compact_size,ExceptionInfo *exception)
{
MagickBooleanType
status;
register unsigned char
*p;
size_t
count,
length,
packet_size,
row_size;
ssize_t
y;
unsigned char
*compact_pixels,
*pixels;
z_stream
stream;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is ZIP compressed");
if ((MagickSizeType) compact_size > GetBlobSize(image))
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size,
sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
packet_size=GetPSDPacketSize(image);
row_size=image->columns*packet_size;
count=image->rows*row_size;
pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
stream.next_in=(Bytef *)compact_pixels;
stream.avail_in=(uInt) compact_size;
stream.next_out=(Bytef *)pixels;
stream.avail_out=(uInt) count;
if (inflateInit(&stream) == Z_OK)
{
int
ret;
while (stream.avail_out > 0)
{
ret=inflate(&stream,Z_SYNC_FLUSH);
if ((ret != Z_OK) && (ret != Z_STREAM_END))
{
(void) inflateEnd(&stream);
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(MagickFalse);
}
if (ret == Z_STREAM_END)
break;
}
(void) inflateEnd(&stream);
}
if (compression == ZipWithPrediction)
{
p=pixels;
while (count > 0)
{
length=image->columns;
while (--length)
{
if (packet_size == 2)
{
p[2]+=p[0]+((p[1]+p[3]) >> 8);
p[3]+=p[1];
}
/*
else if (packet_size == 4)
{
TODO: Figure out what to do there.
}
*/
else
*(p+1)+=*p;
p+=packet_size;
}
p+=packet_size;
count-=row_size;
}
}
status=MagickTrue;
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=ReadPSDChannelPixels(image,channels,y,type,p,exception);
if (status == MagickFalse)
break;
p+=row_size;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#endif
static MagickBooleanType ReadPSDChannel(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info,
const size_t channel,const PSDCompressionType compression,
ExceptionInfo *exception)
{
Image
*channel_image,
*mask;
MagickOffsetType
offset;
MagickBooleanType
status;
channel_image=image;
mask=(Image *) NULL;
if ((layer_info->channel_info[channel].type < -1) &&
(layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0))
{
const char
*option;
/*
Ignore mask that is not a user supplied layer mask, if the mask is
disabled or if the flags have unsupported values.
*/
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if ((layer_info->channel_info[channel].type != -2) ||
(layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) &&
(IsStringTrue(option) == MagickFalse)))
{
(void) SeekBlob(image,(MagickOffsetType)
layer_info->channel_info[channel].size-2,SEEK_CUR);
return(MagickTrue);
}
mask=CloneImage(image,layer_info->mask.page.width,
layer_info->mask.page.height,MagickFalse,exception);
if (mask != (Image *) NULL)
{
(void) ResetImagePixels(mask,exception);
(void) SetImageType(mask,GrayscaleType,exception);
channel_image=mask;
}
}
offset=TellBlob(image);
status=MagickFalse;
switch(compression)
{
case Raw:
status=ReadPSDChannelRaw(channel_image,psd_info->channels,
(ssize_t) layer_info->channel_info[channel].type,exception);
break;
case RLE:
{
MagickOffsetType
*sizes;
sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ReadPSDChannelRLE(channel_image,psd_info,
(ssize_t) layer_info->channel_info[channel].type,sizes,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
}
break;
case ZipWithPrediction:
case ZipWithoutPrediction:
#ifdef MAGICKCORE_ZLIB_DELEGATE
status=ReadPSDChannelZip(channel_image,layer_info->channels,
(ssize_t) layer_info->channel_info[channel].type,compression,
layer_info->channel_info[channel].size-2,exception);
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (ZLIB)",image->filename);
#endif
break;
default:
(void) ThrowMagickException(exception,GetMagickModule(),TypeWarning,
"CompressionNotSupported","'%.20g'",(double) compression);
break;
}
(void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2,
SEEK_SET);
if (status == MagickFalse)
{
if (mask != (Image *) NULL)
(void) DestroyImage(mask);
ThrowBinaryException(CoderError,"UnableToDecompressImage",
image->filename);
}
if (mask != (Image *) NULL)
{
if (layer_info->mask.image != (Image *) NULL)
layer_info->mask.image=DestroyImage(layer_info->mask.image);
layer_info->mask.image=mask;
}
return(status);
}
static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info,
const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception)
{
char
message[MagickPathExtent];
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
j;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" setting up new layer image");
if (psd_info->mode != IndexedMode)
(void) SetImageBackgroundColor(layer_info->image,exception);
layer_info->image->compose=PSDBlendModeToCompositeOperator(
layer_info->blendkey);
if (layer_info->visible == MagickFalse)
layer_info->image->compose=NoCompositeOp;
/*
Set up some hidden attributes for folks that need them.
*/
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.x);
(void) SetImageArtifact(layer_info->image,"psd:layer.x",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.y);
(void) SetImageArtifact(layer_info->image,"psd:layer.y",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double)
layer_info->opacity);
(void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message);
(void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name,
exception);
status=MagickTrue;
for (j=0; j < (ssize_t) layer_info->channels; j++)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for channel %.20g",(double) j);
compression=(PSDCompressionType) ReadBlobShort(layer_info->image);
/* TODO: Remove this when we figure out how to support this */
if ((compression == ZipWithPrediction) && (image->depth == 32))
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeError,"CompressionNotSupported","ZipWithPrediction(32 bit)");
return(MagickFalse);
}
layer_info->image->compression=ConvertPSDCompression(compression);
if (layer_info->channel_info[j].type == -1)
layer_info->image->alpha_trait=BlendPixelTrait;
status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,
(size_t) j,compression,exception);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity,
MagickFalse,exception);
if ((status != MagickFalse) &&
(layer_info->image->colorspace == CMYKColorspace))
status=NegateCMYK(layer_info->image,exception);
if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL))
{
const char
*option;
layer_info->mask.image->page.x=layer_info->mask.page.x;
layer_info->mask.image->page.y=layer_info->mask.page.y;
/* Do not composite the mask when it is disabled */
if ((layer_info->mask.flags & 0x02) == 0x02)
layer_info->mask.image->compose=NoCompositeOp;
else
status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image,
layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse,
exception);
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if (IsStringTrue(option) != MagickFalse)
PreservePSDOpacityMask(image,layer_info,exception);
layer_info->mask.image=DestroyImage(layer_info->mask.image);
}
return(status);
}
static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info,
LayerInfo *layer_info)
{
int
channel_type;
register ssize_t
i;
if (layer_info->channels < psd_info->min_channels)
return(MagickFalse);
channel_type=RedChannel;
if (psd_info->min_channels >= 3)
channel_type|=(GreenChannel | BlueChannel);
if (psd_info->min_channels >= 4)
channel_type|=BlackChannel;
for (i=0; i < (ssize_t) layer_info->channels; i++)
{
short
type;
type=layer_info->channel_info[i].type;
if (type == -1)
{
channel_type|=AlphaChannel;
continue;
}
if (type < -1)
continue;
if (type == 0)
channel_type&=~RedChannel;
else if (type == 1)
channel_type&=~GreenChannel;
else if (type == 2)
channel_type&=~BlueChannel;
else if (type == 3)
channel_type&=~BlackChannel;
}
if (channel_type == 0)
return(MagickTrue);
if ((channel_type == AlphaChannel) &&
(layer_info->channels >= psd_info->min_channels + 1))
return(MagickTrue);
return(MagickFalse);
}
static void AttachPSDLayers(Image *image,LayerInfo *layer_info,
ssize_t number_layers)
{
register ssize_t
i;
ssize_t
j;
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers == 0)
{
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
return;
}
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline MagickBooleanType PSDSkipImage(const PSDInfo *psd_info,
const ImageInfo *image_info,const size_t index)
{
if (psd_info->has_merged_image == MagickFalse)
return(MagickFalse);
if (image_info->number_scenes == 0)
return(MagickFalse);
if (index < image_info->scene)
return(MagickTrue);
if (index > image_info->scene+image_info->number_scenes-1)
return(MagickTrue);
return(MagickFalse);
}
static MagickBooleanType ReadPSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
char
type[4];
LayerInfo
*layer_info;
MagickSizeType
size;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
count,
index,
j,
number_layers;
size=GetPSDSize(psd_info,image);
if (size == 0)
{
/*
Skip layers & masks.
*/
(void) ReadBlobLong(image);
count=ReadBlob(image,4,(unsigned char *) type);
if (count == 4)
ReversePSDString(image,type,(size_t) count);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(MagickTrue);
else
{
count=ReadBlob(image,4,(unsigned char *) type);
if (count == 4)
ReversePSDString(image,type,4);
if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) ||
(LocaleNCompare(type,"Lr32",4) == 0)))
size=GetPSDSize(psd_info,image);
else
return(MagickTrue);
}
}
if (size == 0)
return(MagickTrue);
layer_info=(LayerInfo *) NULL;
number_layers=(ssize_t) ReadBlobSignedShort(image);
if (number_layers < 0)
{
/*
The first alpha channel in the merged result contains the
transparency data for the merged result.
*/
number_layers=MagickAbsoluteValue(number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" negative layer count corrected for");
image->alpha_trait=BlendPixelTrait;
}
/*
We only need to know if the image has an alpha channel
*/
if (skip_layers != MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image contains %.20g layers",(double) number_layers);
if (number_layers == 0)
ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers",
image->filename);
layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers,
sizeof(*layer_info));
if (layer_info == (LayerInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of LayerInfo failed");
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info));
for (i=0; i < number_layers; i++)
{
ssize_t
top,
left,
bottom,
right;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading layer #%.20g",(double) i+1);
top=(ssize_t) ReadBlobSignedLong(image);
left=(ssize_t) ReadBlobSignedLong(image);
bottom=(ssize_t) ReadBlobSignedLong(image);
right=(ssize_t) ReadBlobSignedLong(image);
if ((right < left) || (bottom < top))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
layer_info[i].page.y=top;
layer_info[i].page.x=left;
layer_info[i].page.width=(size_t) (right-left);
layer_info[i].page.height=(size_t) (bottom-top);
layer_info[i].channels=ReadBlobShort(image);
if (layer_info[i].channels > MaxPSDChannels)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded",
image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g",
(double) layer_info[i].page.x,(double) layer_info[i].page.y,
(double) layer_info[i].page.height,(double)
layer_info[i].page.width,(double) layer_info[i].channels);
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
layer_info[i].channel_info[j].type=(short) ReadBlobShort(image);
if ((layer_info[i].channel_info[j].type < -4) ||
(layer_info[i].channel_info[j].type > 4))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"NoSuchImageChannel",
image->filename);
}
layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info,
image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" channel[%.20g]: type=%.20g, size=%.20g",(double) j,
(double) layer_info[i].channel_info[j].type,
(double) layer_info[i].channel_info[j].size);
}
if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadBlob(image,4,(unsigned char *) type);
if (count == 4)
ReversePSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer type was %.4s instead of 8BIM", type);
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey);
if (count != 4)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
ReversePSDString(image,layer_info[i].blendkey,4);
layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
layer_info[i].clipping=(unsigned char) ReadBlobByte(image);
layer_info[i].flags=(unsigned char) ReadBlobByte(image);
layer_info[i].visible=!(layer_info[i].flags & 0x02);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s",
layer_info[i].blendkey,(double) layer_info[i].opacity,
layer_info[i].clipping ? "true" : "false",layer_info[i].flags,
layer_info[i].visible ? "true" : "false");
(void) ReadBlobByte(image); /* filler */
size=ReadBlobLong(image);
if (size != 0)
{
MagickSizeType
combined_length,
length;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer contains additional info");
length=ReadBlobLong(image);
combined_length=length+4;
if (length != 0)
{
/*
Layer mask info.
*/
layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.height=(size_t)
(ReadBlobSignedLong(image)-layer_info[i].mask.page.y);
layer_info[i].mask.page.width=(size_t) (
ReadBlobSignedLong(image)-layer_info[i].mask.page.x);
layer_info[i].mask.background=(unsigned char) ReadBlobByte(
image);
layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image);
if (!(layer_info[i].mask.flags & 0x01))
{
layer_info[i].mask.page.y=layer_info[i].mask.page.y-
layer_info[i].page.y;
layer_info[i].mask.page.x=layer_info[i].mask.page.x-
layer_info[i].page.x;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g",
(double) layer_info[i].mask.page.x,(double)
layer_info[i].mask.page.y,(double)
layer_info[i].mask.page.width,(double)
layer_info[i].mask.page.height,(double) ((MagickOffsetType)
length)-18);
/*
Skip over the rest of the layer mask information.
*/
if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=ReadBlobLong(image);
combined_length+=length+4;
if (length != 0)
{
/*
Layer blending ranges info.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer blending ranges: length=%.20g",(double)
((MagickOffsetType) length));
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
/*
Layer name.
*/
length=(MagickSizeType) (unsigned char) ReadBlobByte(image);
combined_length+=length+1;
if (length > 0)
(void) ReadBlob(image,(size_t) length++,layer_info[i].name);
layer_info[i].name[length]='\0';
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer name: %s",layer_info[i].name);
if ((length % 4) != 0)
{
length=4-(length % 4);
combined_length+=length;
/* Skip over the padding of the layer name */
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=(MagickSizeType) size-combined_length;
if (length > 0)
{
unsigned char
*info;
if (length > GetBlobSize(image))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"InsufficientImageDataInFile",image->filename);
}
layer_info[i].info=AcquireStringInfo((const size_t) length);
info=GetStringInfoDatum(layer_info[i].info);
(void) ReadBlob(image,(const size_t) length,info);
}
}
}
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is empty");
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
continue;
}
/*
Allocate layered image.
*/
layer_info[i].image=CloneImage(image,layer_info[i].page.width,
layer_info[i].page.height,MagickFalse,exception);
if (layer_info[i].image == (Image *) NULL)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of image for layer %.20g failed",(double) i);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (layer_info[i].info != (StringInfo *) NULL)
{
(void) SetImageProfile(layer_info[i].image,"psd:additional-info",
layer_info[i].info,exception);
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
}
if (image_info->ping != MagickFalse)
{
AttachPSDLayers(image,layer_info,number_layers);
return(MagickTrue);
}
status=MagickTrue;
index=0;
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].image == (Image *) NULL) ||
(PSDSkipImage(psd_info, image_info,++index) != MagickFalse))
{
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
if (DiscardBlobBytes(image,(MagickSizeType)
layer_info[i].channel_info[j].size) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
continue;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for layer %.20g",(double) i);
status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i],
exception);
if (status == MagickFalse)
break;
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
(MagickSizeType) number_layers);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
AttachPSDLayers(image,layer_info,number_layers);
else
layer_info=DestroyLayerInfo(layer_info,number_layers);
return(status);
}
ModuleExport MagickBooleanType ReadPSDLayers(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=ReadPolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickTrue);
return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse,
exception));
}
static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info,
Image *image,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickOffsetType
*sizes;
MagickBooleanType
status;
PSDCompressionType
compression;
register ssize_t
i;
if ((image_info->number_scenes != 0) && (image_info->scene != 0))
return(MagickTrue);
compression=(PSDCompressionType) ReadBlobMSBShort(image);
image->compression=ConvertPSDCompression(compression);
if (compression != Raw && compression != RLE)
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression);
return(MagickFalse);
}
sizes=(MagickOffsetType *) NULL;
if (compression == RLE)
{
sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=MagickTrue;
for (i=0; i < (ssize_t) psd_info->channels; i++)
{
ssize_t
type;
type=i;
if ((type == 1) && (psd_info->channels == 2))
type=-1;
if (compression == RLE)
status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows),
exception);
else
status=ReadPSDChannelRaw(image,psd_info->channels,type,exception);
if (status != MagickFalse)
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
psd_info->channels);
if (status == MagickFalse)
break;
}
if ((status != MagickFalse) && (image->colorspace == CMYKColorspace))
status=NegateCMYK(image,exception);
if (status != MagickFalse)
status=CorrectPSDAlphaBlend(image_info,image,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
return(status);
}
static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
skip_layers;
MagickOffsetType
offset;
MagickSizeType
length;
MagickBooleanType
status;
PSDInfo
psd_info;
register ssize_t
i;
size_t
imageListLength;
ssize_t
count;
StringInfo
*profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read image header.
*/
image->endian=MSBEndian;
count=ReadBlob(image,4,(unsigned char *) psd_info.signature);
psd_info.version=ReadBlobMSBShort(image);
if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) ||
((psd_info.version != 1) && (psd_info.version != 2)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
(void) ReadBlob(image,6,psd_info.reserved);
psd_info.channels=ReadBlobMSBShort(image);
if (psd_info.channels < 1)
ThrowReaderException(CorruptImageError,"MissingImageChannel");
if (psd_info.channels > MaxPSDChannels)
ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded");
psd_info.rows=ReadBlobMSBLong(image);
psd_info.columns=ReadBlobMSBLong(image);
if ((psd_info.version == 1) && ((psd_info.rows > 30000) ||
(psd_info.columns > 30000)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.depth=ReadBlobMSBShort(image);
if ((psd_info.depth != 1) && (psd_info.depth != 8) &&
(psd_info.depth != 16) && (psd_info.depth != 32))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.mode=ReadBlobMSBShort(image);
if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s",
(double) psd_info.columns,(double) psd_info.rows,(double)
psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType)
psd_info.mode));
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Initialize image.
*/
image->depth=psd_info.depth;
image->columns=psd_info.columns;
image->rows=psd_info.rows;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
psd_info.min_channels=3;
if (psd_info.mode == LabMode)
(void) SetImageColorspace(image,LabColorspace,exception);
if (psd_info.mode == CMYKMode)
{
psd_info.min_channels=4;
(void) SetImageColorspace(image,CMYKColorspace,exception);
}
else
if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) ||
(psd_info.mode == DuotoneMode))
{
if (psd_info.depth != 32)
{
status=AcquireImageColormap(image,(size_t) (psd_info.depth < 16 ?
256 : 65536),exception);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image colormap allocated");
}
psd_info.min_channels=1;
(void) SetImageColorspace(image,GRAYColorspace,exception);
}
else
if (psd_info.mode == IndexedMode)
psd_info.min_channels=1;
if (psd_info.channels < psd_info.min_channels)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Read PSD raster colormap only present for indexed and duotone images.
*/
length=ReadBlobMSBLong(image);
if ((psd_info.mode == IndexedMode) && (length < 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (length != 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading colormap");
if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32))
{
/*
Duotone image data; the format of this data is undocumented.
32 bits per pixel; the colormap is ignored.
*/
(void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR);
}
else
{
size_t
number_colors;
/*
Read PSD raster colormap.
*/
number_colors=(size_t) length/3;
if (number_colors > 65536)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireImageColormap(image,number_colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].red=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].green=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
image->alpha_trait=UndefinedPixelTrait;
}
}
if ((image->depth == 1) && (image->storage_class != PseudoClass))
ThrowReaderException(CorruptImageError, "ImproperImageHeader");
psd_info.has_merged_image=MagickTrue;
profile=(StringInfo *) NULL;
length=ReadBlobMSBLong(image);
if (length != 0)
{
unsigned char
*blocks;
/*
Image resources block.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading image resource blocks - %.20g bytes",(double)
((MagickOffsetType) length));
if (length > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
blocks=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*blocks));
if (blocks == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) length,blocks);
if ((count != (ssize_t) length) || (length < 4) ||
(LocaleNCompare((char *) blocks,"8BIM",4) != 0))
{
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
profile=ParseImageResourceBlocks(&psd_info,image,blocks,(size_t) length,
exception);
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
}
/*
Layer and mask block.
*/
length=GetPSDSize(&psd_info,image);
if (length == 8)
{
length=ReadBlobMSBLong(image);
length=ReadBlobMSBLong(image);
}
offset=TellBlob(image);
skip_layers=MagickFalse;
if ((image_info->number_scenes == 1) && (image_info->scene == 0) &&
(psd_info.has_merged_image != MagickFalse))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" read composite only");
skip_layers=MagickTrue;
}
if (length == 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image has no layers");
}
else
{
if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers,
exception) != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Skip the rest of the layer and mask information.
*/
(void) SeekBlob(image,offset+length,SEEK_SET);
}
/*
If we are only "pinging" the image, then we're done - so return.
*/
if (EOFBlob(image) != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
}
if (image_info->ping != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
Read the precombined layer, present for PSD < 4 compatibility.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading the precombined layer");
imageListLength=GetImageListLength(image);
if ((psd_info.has_merged_image != MagickFalse) || (imageListLength == 1))
psd_info.has_merged_image=(MagickBooleanType) ReadPSDMergedImage(
image_info,image,&psd_info,exception);
if ((psd_info.has_merged_image == MagickFalse) && (imageListLength == 1) &&
(length != 0))
{
(void) SeekBlob(image,offset,SEEK_SET);
status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse,
exception);
if (status != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
}
if (psd_info.has_merged_image == MagickFalse)
{
Image
*merged;
if (imageListLength == 1)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
}
image->background_color.alpha=(MagickRealType) TransparentAlpha;
image->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(image,exception);
merged=MergeImageLayers(image,FlattenLayer,exception);
ReplaceImageInList(&image,merged);
}
if (profile != (StringInfo *) NULL)
{
Image
*next;
i=0;
next=image;
while (next != (Image *) NULL)
{
if (PSDSkipImage(&psd_info,image_info,i++) == MagickFalse)
(void) SetImageProfile(next,GetStringInfoName(profile),profile,
exception);
next=next->next;
}
profile=DestroyStringInfo(profile);
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPSDImage() adds properties for the PSD image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPSDImage method is:
%
% size_t RegisterPSDImage(void)
%
*/
ModuleExport size_t RegisterPSDImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPSDImage() removes format registrations made by the
% PSD module from the list of supported formats.
%
% The format of the UnregisterPSDImage method is:
%
% UnregisterPSDImage(void)
%
*/
ModuleExport void UnregisterPSDImage(void)
{
(void) UnregisterMagickInfo("PSB");
(void) UnregisterMagickInfo("PSD");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePSDImage() writes an image in the Adobe Photoshop encoded image format.
%
% The format of the WritePSDImage method is:
%
% MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image,
const size_t offset)
{
if (psd_info->version == 1)
return(WriteBlobMSBShort(image,(unsigned short) offset));
return(WriteBlobMSBLong(image,(unsigned int) offset));
}
static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBShort(image,(unsigned short) size);
else
result=WriteBlobMSBLong(image,(unsigned int) size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size)
{
if (psd_info->version == 1)
return(WriteBlobLong(image,(unsigned int) size));
return(WriteBlobLongLong(image,size));
}
static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
result=SetPSDSize(psd_info,image,size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static size_t PSDPackbitsEncodeImage(Image *image,const size_t length,
const unsigned char *pixels,unsigned char *compact_pixels,
ExceptionInfo *exception)
{
int
count;
register ssize_t
i,
j;
register unsigned char
*q;
unsigned char
*packbits;
/*
Compress pixels with Packbits encoding.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (unsigned char *) NULL);
assert(compact_pixels != (unsigned char *) NULL);
packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits));
if (packbits == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
q=compact_pixels;
for (i=(ssize_t) length; i != 0; )
{
switch (i)
{
case 1:
{
i--;
*q++=(unsigned char) 0;
*q++=(*pixels);
break;
}
case 2:
{
i-=2;
*q++=(unsigned char) 1;
*q++=(*pixels);
*q++=pixels[1];
break;
}
case 3:
{
i-=3;
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
*q++=(unsigned char) ((256-3)+1);
*q++=(*pixels);
break;
}
*q++=(unsigned char) 2;
*q++=(*pixels);
*q++=pixels[1];
*q++=pixels[2];
break;
}
default:
{
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
/*
Packed run.
*/
count=3;
while (((ssize_t) count < i) && (*pixels == *(pixels+count)))
{
count++;
if (count >= 127)
break;
}
i-=count;
*q++=(unsigned char) ((256-count)+1);
*q++=(*pixels);
pixels+=count;
break;
}
/*
Literal run.
*/
count=0;
while ((*(pixels+count) != *(pixels+count+1)) ||
(*(pixels+count+1) != *(pixels+count+2)))
{
packbits[count+1]=pixels[count];
count++;
if (((ssize_t) count >= (i-3)) || (count >= 127))
break;
}
i-=count;
*packbits=(unsigned char) (count-1);
for (j=0; j <= (ssize_t) count; j++)
*q++=packbits[j];
pixels+=count;
break;
}
}
}
*q++=(unsigned char) 128; /* EOD marker */
packbits=(unsigned char *) RelinquishMagickMemory(packbits);
return((size_t) (q-compact_pixels));
}
static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image,
const Image *next_image,const CompressionType compression,
const ssize_t channels)
{
size_t
length;
ssize_t
i,
y;
if (compression == RLECompression)
{
length=(size_t) WriteBlobShort(image,RLE);
for (i=0; i < channels; i++)
for (y=0; y < (ssize_t) next_image->rows; y++)
length+=SetPSDOffset(psd_info,image,0);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
length=(size_t) WriteBlobShort(image,ZipWithoutPrediction);
#endif
else
length=(size_t) WriteBlobShort(image,Raw);
return(length);
}
static size_t WritePSDChannel(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
const QuantumType quantum_type, unsigned char *compact_pixels,
MagickOffsetType size_offset,const MagickBooleanType separate,
const CompressionType compression,ExceptionInfo *exception)
{
MagickBooleanType
monochrome;
QuantumInfo
*quantum_info;
register const Quantum
*p;
register ssize_t
i;
size_t
count,
length;
ssize_t
y;
unsigned char
*pixels;
#ifdef MAGICKCORE_ZLIB_DELEGATE
int
flush,
level;
unsigned char
*compressed_pixels;
z_stream
stream;
compressed_pixels=(unsigned char *) NULL;
flush=Z_NO_FLUSH;
#endif
count=0;
if (separate != MagickFalse)
{
size_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,1);
}
if (next_image->depth > 8)
next_image->depth=16;
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
quantum_info=AcquireQuantumInfo(image_info,next_image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
compressed_pixels=(unsigned char *) AcquireQuantumMemory(
MagickMinBufferExtent,sizeof(*compressed_pixels));
if (compressed_pixels == (unsigned char *) NULL)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
level=Z_DEFAULT_COMPRESSION;
if ((image_info->quality > 0 && image_info->quality < 10))
level=(int) image_info->quality;
if (deflateInit(&stream,level) != Z_OK)
{
quantum_info=DestroyQuantumInfo(quantum_info);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
return(0);
}
}
#endif
for (y=0; y < (ssize_t) next_image->rows; y++)
{
p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
if (monochrome != MagickFalse)
for (i=0; i < (ssize_t) length; i++)
pixels[i]=(~pixels[i]);
if (compression == RLECompression)
{
length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels,
exception);
count+=WriteBlob(image,length,compact_pixels);
size_offset+=WritePSDOffset(psd_info,image,length,size_offset);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
{
stream.avail_in=(uInt) length;
stream.next_in=(Bytef *) pixels;
if (y == (ssize_t) next_image->rows-1)
flush=Z_FINISH;
do {
stream.avail_out=(uInt) MagickMinBufferExtent;
stream.next_out=(Bytef *) compressed_pixels;
if (deflate(&stream,flush) == Z_STREAM_ERROR)
break;
length=(size_t) MagickMinBufferExtent-stream.avail_out;
if (length > 0)
count+=WriteBlob(image,length,compressed_pixels);
} while (stream.avail_out == 0);
}
#endif
else
count+=WriteBlob(image,length,pixels);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
(void) deflateEnd(&stream);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
}
#endif
quantum_info=DestroyQuantumInfo(quantum_info);
return(count);
}
static unsigned char *AcquireCompactPixels(const Image *image,
ExceptionInfo *exception)
{
size_t
packet_size;
unsigned char
*compact_pixels;
packet_size=image->depth > 8UL ? 2UL : 1UL;
compact_pixels=(unsigned char *) AcquireQuantumMemory((9*
image->columns)+1,packet_size*sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
}
return(compact_pixels);
}
static size_t WritePSDChannels(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
MagickOffsetType size_offset,const MagickBooleanType separate,
ExceptionInfo *exception)
{
CompressionType
compression;
Image
*mask;
MagickOffsetType
rows_offset;
size_t
channels,
count,
length,
offset_length;
unsigned char
*compact_pixels;
count=0;
offset_length=0;
rows_offset=0;
compact_pixels=(unsigned char *) NULL;
compression=next_image->compression;
if (image_info->compression != UndefinedCompression)
compression=image_info->compression;
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(next_image,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
channels=1;
if (separate == MagickFalse)
{
if ((next_image->storage_class != PseudoClass) ||
(IsImageGray(next_image) != MagickFalse))
{
if (IsImageGray(next_image) == MagickFalse)
channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 :
3);
if (next_image->alpha_trait != UndefinedPixelTrait)
channels++;
}
rows_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,
(ssize_t) channels);
offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4));
}
size_offset+=2;
if ((next_image->storage_class == PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
IndexQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (IsImageGray(next_image) != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
GrayQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
length=WritePSDChannel(psd_info,image_info,image,next_image,
RedQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
GreenQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlueQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
if (next_image->colorspace == CMYKColorspace)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlackQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
if (next_image->alpha_trait != UndefinedPixelTrait)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
AlphaQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
if (separate != MagickFalse)
{
const char
*property;
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
exception);
if (mask != (Image *) NULL)
{
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(mask,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
length=WritePSDChannel(psd_info,image_info,image,mask,
RedQuantum,compact_pixels,rows_offset,MagickTrue,compression,
exception);
(void) WritePSDSize(psd_info,image,length,size_offset);
count+=length;
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
}
}
}
return(count);
}
static size_t WritePascalString(Image *image,const char *value,size_t padding)
{
size_t
count,
length;
register ssize_t
i;
/*
Max length is 255.
*/
count=0;
length=(strlen(value) > 255UL ) ? 255UL : strlen(value);
if (length == 0)
count+=WriteBlobByte(image,0);
else
{
count+=WriteBlobByte(image,(unsigned char) length);
count+=WriteBlob(image,length,(const unsigned char *) value);
}
length++;
if ((length % padding) == 0)
return(count);
for (i=0; i < (ssize_t) (padding-(length % padding)); i++)
count+=WriteBlobByte(image,0);
return(count);
}
static void WriteResolutionResourceBlock(Image *image)
{
double
x_resolution,
y_resolution;
unsigned short
units;
if (image->units == PixelsPerCentimeterResolution)
{
x_resolution=2.54*65536.0*image->resolution.x+0.5;
y_resolution=2.54*65536.0*image->resolution.y+0.5;
units=2;
}
else
{
x_resolution=65536.0*image->resolution.x+0.5;
y_resolution=65536.0*image->resolution.y+0.5;
units=1;
}
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x03ED);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,16); /* resource size */
(void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */
(void) WriteBlobMSBShort(image,units); /* width unit */
(void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* vertical resolution unit */
(void) WriteBlobMSBShort(image,units); /* height unit */
}
static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image,
const signed short channel)
{
size_t
count;
count=(size_t) WriteBlobShort(image,(const unsigned short) channel);
count+=SetPSDSize(psd_info,image,0);
return(count);
}
static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if (id == 0x0000040f)
{
ssize_t
quantum;
quantum=PSDQuantum(count)+12;
if ((quantum >= 12) && (quantum < (ssize_t) length))
{
if ((q+quantum < (datum+length-16)))
(void) memmove(q,q+quantum,length-quantum-(q-datum));
SetStringInfoLength(bim_profile,length-quantum);
}
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
ssize_t
cnt;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
return;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
cnt=PSDQuantum(count);
if (cnt < 0)
return;
if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) &&
((ssize_t) length-(cnt+12)-(q-datum)) > 0)
{
(void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum));
SetStringInfoLength(bim_profile,length-(cnt+12));
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
#define PSDKeySize 5
#define PSDAllowedLength 36
char
key[PSDKeySize];
/* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */
const char
allowed[PSDAllowedLength][PSDKeySize] = {
"blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk",
"GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr",
"lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl",
"post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA"
},
*option;
const StringInfo
*info;
MagickBooleanType
found;
register size_t
i;
size_t
remaining_length,
length;
StringInfo
*profile;
unsigned char
*p;
unsigned int
size;
info=GetImageProfile(image,"psd:additional-info");
if (info == (const StringInfo *) NULL)
return((const StringInfo *) NULL);
option=GetImageOption(image_info,"psd:additional-info");
if (LocaleCompare(option,"all") == 0)
return(info);
if (LocaleCompare(option,"selective") != 0)
{
profile=RemoveImageProfile(image,"psd:additional-info");
return(DestroyStringInfo(profile));
}
length=GetStringInfoLength(info);
p=GetStringInfoDatum(info);
remaining_length=length;
length=0;
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
return((const StringInfo *) NULL);
found=MagickFalse;
for (i=0; i < PSDAllowedLength; i++)
{
if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0)
continue;
found=MagickTrue;
break;
}
remaining_length-=(size_t) size;
if (found == MagickFalse)
{
if (remaining_length > 0)
p=(unsigned char *) memmove(p-12,p+size,remaining_length);
continue;
}
length+=(size_t) size+12;
p+=size;
}
profile=RemoveImageProfile(image,"psd:additional-info");
if (length == 0)
return(DestroyStringInfo(profile));
SetStringInfoLength(profile,(const size_t) length);
(void) SetImageProfile(image,"psd:additional-info",info,exception);
return(profile);
}
static MagickBooleanType WritePSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size,
ExceptionInfo *exception)
{
char
layer_name[MagickPathExtent];
const char
*property;
const StringInfo
*info;
Image
*base_image,
*next_image;
MagickBooleanType
status;
MagickOffsetType
*layer_size_offsets,
size_offset;
register ssize_t
i;
size_t
layer_count,
layer_index,
length,
name_length,
rounded_size,
size;
status=MagickTrue;
base_image=GetNextImageInList(image);
if (base_image == (Image *) NULL)
base_image=image;
size=0;
size_offset=TellBlob(image);
(void) SetPSDSize(psd_info,image,0);
layer_count=0;
for (next_image=base_image; next_image != NULL; )
{
layer_count++;
next_image=GetNextImageInList(next_image);
}
if (image->alpha_trait != UndefinedPixelTrait)
size+=WriteBlobShort(image,-(unsigned short) layer_count);
else
size+=WriteBlobShort(image,(unsigned short) layer_count);
layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory(
(size_t) layer_count,sizeof(MagickOffsetType));
if (layer_size_offsets == (MagickOffsetType *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
layer_index=0;
for (next_image=base_image; next_image != NULL; )
{
Image
*mask;
unsigned char
default_color;
unsigned short
channels,
total_channels;
mask=(Image *) NULL;
property=GetImageArtifact(next_image,"psd:opacity-mask");
default_color=0;
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception);
default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0);
}
size+=WriteBlobSignedLong(image,(signed int) next_image->page.y);
size+=WriteBlobSignedLong(image,(signed int) next_image->page.x);
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+
next_image->rows));
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+
next_image->columns));
channels=1;
if ((next_image->storage_class != PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 :
3);
total_channels=channels;
if (next_image->alpha_trait != UndefinedPixelTrait)
total_channels++;
if (mask != (Image *) NULL)
total_channels++;
size+=WriteBlobShort(image,total_channels);
layer_size_offsets[layer_index++]=TellBlob(image);
for (i=0; i < (ssize_t) channels; i++)
size+=WriteChannelSize(psd_info,image,(signed short) i);
if (next_image->alpha_trait != UndefinedPixelTrait)
size+=WriteChannelSize(psd_info,image,-1);
if (mask != (Image *) NULL)
size+=WriteChannelSize(psd_info,image,-2);
size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM");
size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(next_image));
property=GetImageArtifact(next_image,"psd:layer.opacity");
if (property != (const char *) NULL)
{
Quantum
opacity;
opacity=(Quantum) StringToInteger(property);
size+=WriteBlobByte(image,ScaleQuantumToChar(opacity));
(void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception);
}
else
size+=WriteBlobByte(image,255);
size+=WriteBlobByte(image,0);
size+=WriteBlobByte(image,(const unsigned char)
(next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */
size+=WriteBlobByte(image,0);
info=GetAdditionalInformation(image_info,next_image,exception);
property=(const char *) GetImageProperty(next_image,"label",exception);
if (property == (const char *) NULL)
{
(void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g",
(double) layer_index);
property=layer_name;
}
name_length=strlen(property)+1;
if ((name_length % 4) != 0)
name_length+=(4-(name_length % 4));
if (info != (const StringInfo *) NULL)
name_length+=GetStringInfoLength(info);
name_length+=8;
if (mask != (Image *) NULL)
name_length+=20;
size+=WriteBlobLong(image,(unsigned int) name_length);
if (mask == (Image *) NULL)
size+=WriteBlobLong(image,0);
else
{
if (mask->compose != NoCompositeOp)
(void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum(
default_color),MagickTrue,exception);
mask->page.y+=image->page.y;
mask->page.x+=image->page.x;
size+=WriteBlobLong(image,20);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.y);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.x);
size+=WriteBlobSignedLong(image,(const signed int) (mask->rows+
mask->page.y));
size+=WriteBlobSignedLong(image,(const signed int) (mask->columns+
mask->page.x));
size+=WriteBlobByte(image,default_color);
size+=WriteBlobByte(image,(const unsigned char)
(mask->compose == NoCompositeOp ? 2 : 0));
size+=WriteBlobMSBShort(image,0);
}
size+=WriteBlobLong(image,0);
size+=WritePascalString(image,property,4);
if (info != (const StringInfo *) NULL)
size+=WriteBlob(image,GetStringInfoLength(info),
GetStringInfoDatum(info));
next_image=GetNextImageInList(next_image);
}
/*
Now the image data!
*/
next_image=base_image;
layer_index=0;
while (next_image != NULL)
{
length=WritePSDChannels(psd_info,image_info,image,next_image,
layer_size_offsets[layer_index++],MagickTrue,exception);
if (length == 0)
{
status=MagickFalse;
break;
}
size+=length;
next_image=GetNextImageInList(next_image);
}
/*
Write the total size
*/
if (layers_size != (size_t*) NULL)
*layers_size=size;
if ((size/2) != ((size+1)/2))
rounded_size=size+1;
else
rounded_size=size;
(void) WritePSDSize(psd_info,image,rounded_size,size_offset);
layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory(
layer_size_offsets);
/*
Remove the opacity mask from the registry
*/
next_image=base_image;
while (next_image != (Image *) NULL)
{
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
(void) DeleteImageRegistry(property);
next_image=GetNextImageInList(next_image);
}
return(status);
}
ModuleExport MagickBooleanType WritePSDLayers(Image * image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=WritePolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickTrue);
return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL,
exception);
}
static MagickBooleanType WritePSDImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
const StringInfo
*icc_profile;
MagickBooleanType
status;
PSDInfo
psd_info;
register ssize_t
i;
size_t
length,
num_channels,
packet_size;
StringInfo
*bim_profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
packet_size=(size_t) (image->depth > 8 ? 6 : 3);
if (image->alpha_trait != UndefinedPixelTrait)
packet_size+=image->depth > 8 ? 2 : 1;
psd_info.version=1;
if ((LocaleCompare(image_info->magick,"PSB") == 0) ||
(image->columns > 30000) || (image->rows > 30000))
psd_info.version=2;
(void) WriteBlob(image,4,(const unsigned char *) "8BPS");
(void) WriteBlobMSBShort(image,psd_info.version); /* version */
for (i=1; i <= 6; i++)
(void) WriteBlobByte(image, 0); /* 6 bytes of reserved */
/* When the image has a color profile it won't be converted to gray scale */
if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) &&
(SetImageGray(image,exception) != MagickFalse))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
if ((image_info->type != TrueColorType) && (image_info->type !=
TrueColorAlphaType) && (image->storage_class == PseudoClass))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
{
if (image->storage_class == PseudoClass)
(void) SetImageStorageClass(image,DirectClass,exception);
if (image->colorspace != CMYKColorspace)
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL);
else
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL);
}
(void) WriteBlobMSBShort(image,(unsigned short) num_channels);
(void) WriteBlobMSBLong(image,(unsigned int) image->rows);
(void) WriteBlobMSBLong(image,(unsigned int) image->columns);
if (IsImageGray(image) != MagickFalse)
{
MagickBooleanType
monochrome;
/*
Write depth & mode.
*/
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8));
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? BitmapMode : GrayscaleMode));
}
else
{
(void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class ==
PseudoClass ? 8 : image->depth > 8 ? 16 : 8));
if (((image_info->colorspace != UndefinedColorspace) ||
(image->colorspace != CMYKColorspace)) &&
(image_info->colorspace != CMYKColorspace))
{
(void) TransformImageColorspace(image,sRGBColorspace,exception);
(void) WriteBlobMSBShort(image,(unsigned short)
(image->storage_class == PseudoClass ? IndexedMode : RGBMode));
}
else
{
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,CMYKColorspace,exception);
(void) WriteBlobMSBShort(image,CMYKMode);
}
}
if ((IsImageGray(image) != MagickFalse) ||
(image->storage_class == DirectClass) || (image->colors > 256))
(void) WriteBlobMSBLong(image,0);
else
{
/*
Write PSD raster colormap.
*/
(void) WriteBlobMSBLong(image,768);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].red)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].green)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].blue)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
}
/*
Image resource block.
*/
length=28; /* 0x03EB */
bim_profile=(StringInfo *) GetImageProfile(image,"8bim");
icc_profile=GetImageProfile(image,"icc");
if (bim_profile != (StringInfo *) NULL)
{
bim_profile=CloneStringInfo(bim_profile);
if (icc_profile != (StringInfo *) NULL)
RemoveICCProfileFromResourceBlock(bim_profile);
RemoveResolutionFromResourceBlock(bim_profile);
length+=PSDQuantum(GetStringInfoLength(bim_profile));
}
if (icc_profile != (const StringInfo *) NULL)
length+=PSDQuantum(GetStringInfoLength(icc_profile))+12;
(void) WriteBlobMSBLong(image,(unsigned int) length);
WriteResolutionResourceBlock(image);
if (bim_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,GetStringInfoLength(bim_profile),
GetStringInfoDatum(bim_profile));
bim_profile=DestroyStringInfo(bim_profile);
}
if (icc_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x0000040F);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength(
icc_profile));
(void) WriteBlob(image,GetStringInfoLength(icc_profile),
GetStringInfoDatum(icc_profile));
if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile)))
(void) WriteBlobByte(image,0);
}
if (status != MagickFalse)
{
MagickOffsetType
size_offset;
size_t
size;
size_offset=TellBlob(image);
(void) SetPSDSize(&psd_info,image,0);
status=WritePSDLayersInternal(image,image_info,&psd_info,&size,
exception);
size_offset+=WritePSDSize(&psd_info,image,size+
(psd_info.version == 1 ? 8 : 12),size_offset);
}
(void) WriteBlobMSBLong(image,0); /* user mask data */
/*
Write composite image.
*/
if (status != MagickFalse)
{
CompressionType
compression;
compression=image->compression;
if (image_info->compression != UndefinedCompression)
image->compression=image_info->compression;
if (image->compression == ZipCompression)
image->compression=RLECompression;
if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse,
exception) == 0)
status=MagickFalse;
image->compression=compression;
}
(void) CloseBlob(image);
return(status);
}
|
driver.c | #include "driver.h"
#include "calculate.h"
#include "collections.h"
#include "config.h"
#include "jump.h"
#include "malloc.h"
#include "print.h"
#include "random.h"
#include "read.h"
#include "util.h"
// -------- Input --------
void
process_input(input_t * restrict pinput, const plan_t * restrict pplan)
{
read_raw_input(pinput, pplan);
postprocess_type_word_count(pinput, pplan);
postprocess_collections(pinput, pplan);
}
// -------- Merging (generic) --------
static void
merge_stat_one(const stat_t * restrict source,
stat_t * restrict target,
unsigned elements)
{
if (source != NULL) {
assert(target != NULL);
#pragma omp parallel for
for (unsigned j = 0; j < elements; j++) {
target[j].lower += source[j].lower;
target[j].upper += source[j].upper;
}
} else {
assert(target == NULL);
}
}
static void
merge_stat(yxstat_t * restrict yxtarget,
const yxstat_t * restrict yxsource)
{
for (unsigned i = 0; i < NYX; i++) {
assert(yxsource->elements.yx[i] == yxtarget->elements.yx[i]);
merge_stat_one(yxsource->yx[i], yxtarget->yx[i], yxsource->elements.yx[i]);
}
}
static yxstat_t
merge_all(yxstat_t **ppyxstat_head,
unsigned result_count)
{
// Convert to vector
assert(result_count >= 1);
yxstat_t yxstat_vector[result_count];
for (unsigned i = 0; i < result_count; i++) {
yxstat_t *pyxstat = *ppyxstat_head;
assert(pyxstat != NULL);
yxstat_vector[i] = *pyxstat;
yxstat_vector[i].next = NULL;
*ppyxstat_head = pyxstat->next;
free(pyxstat);
}
assert(*ppyxstat_head == NULL);
// Merge results
while (result_count > 1) {
unsigned half = result_count / 2;
unsigned a = result_count - 2 * half;
unsigned b = result_count - half;
#pragma omp parallel for
for (unsigned i = 0; i < half; i++) {
unsigned from = b + i;
unsigned to = a + i;
merge_stat(&yxstat_vector[to], &yxstat_vector[from]);
free_stat(&yxstat_vector[from]);
}
result_count = b;
}
return yxstat_vector[0];
}
// -------- Permutation testing --------
static void
summarise_collections(const input_t * restrict pinput,
collection_t * restrict pcoll,
const algv_t * restrict algv)
{
#pragma omp parallel for
for (unsigned c = 0; c < pinput->collections.ncol; c++) {
algv->summarise_collection(pinput, pcoll, c);
}
}
static unsigned
calculate_permtest_parallel(const input_t * restrict pinput,
const rng_state_t * restrict rng_state_init,
const collection_t * restrict pcoll,
yxstat_t **ppyxstat_head,
const alg_t * restrict alg,
const algv_t * restrict algv)
{
unsigned result_count = 0;
unsigned gen_from = get_generator(pinput->processes, pinput->id);
unsigned gen_to = get_generator(pinput->processes, pinput->id + 1);
#pragma omp parallel
{
yxstat_t *pyxstat = alloc_stat_uniform(&alg->outputs, pinput->collections.ncol);
#pragma omp critical
{
pyxstat->next = *ppyxstat_head;
*ppyxstat_head = pyxstat;
result_count++;
}
#pragma omp for nowait
for (unsigned part = gen_from; part < gen_to; part++) {
algv->calculate_permtest(pinput, rng_state_init, pcoll, pyxstat, part);
}
}
return result_count;
}
static yxstat_t
calculate_permtest(const input_t * restrict pinput,
const rng_state_t * restrict rng_state_init,
const collection_t * restrict pcoll,
const alg_t * restrict alg,
const algv_t * restrict algv)
{
yxstat_t *yxstat_head = NULL;
unsigned result_count = calculate_permtest_parallel(pinput, rng_state_init, pcoll, &yxstat_head, alg, algv);
return merge_all(&yxstat_head, result_count);
}
static void
print_permtest(input_t * restrict pinput,
const collection_t * restrict pcoll,
const yxstat_t * restrict pyxstat)
{
for (unsigned i = 0; i < NYX; i++) {
print_permtest_one(pinput, pcoll, pyxstat->yx[i], i);
}
}
static void
calculate_and_print_permtest(input_t * restrict pinput,
const rng_state_t * restrict rng_state_init,
const alg_t * restrict alg,
const algv_t * restrict algv)
{
collection_t * restrict pcoll;
MYMALLOCZ(pcoll, collection_t, pinput->collections.ncol, COLLECTION_NULL_C);
summarise_collections(pinput, pcoll, algv);
yxstat_t yxstat = calculate_permtest(pinput, rng_state_init, pcoll, alg, algv);
print_permtest(pinput, pcoll, &yxstat);
free_stat(&yxstat);
free(pcoll);
if (pinput->progress) {
fprintf(stderr, "P");
}
}
// -------- Curves --------
static unsigned
calculate_curves_parallel(const input_t * restrict pinput,
const rng_state_t * restrict rng_state_init,
const grid_t * restrict pgrid,
yxstat_t **ppyxstat_head,
const alg_t * restrict alg,
const algv_t * restrict algv)
{
unsigned result_count = 0;
unsigned gen_from = get_generator(pinput->processes, pinput->id);
unsigned gen_to = get_generator(pinput->processes, pinput->id + 1);
#pragma omp parallel
{
yxstat_t *pyxstat = alloc_stat(&alg->outputs, &pgrid->elements);
#pragma omp critical
{
pyxstat->next = *ppyxstat_head;
*ppyxstat_head = pyxstat;
result_count++;
}
#pragma omp for nowait
for (unsigned part = gen_from; part < gen_to; part++) {
algv->calculate_curves(pinput, rng_state_init, pgrid, pyxstat, part);
}
}
return result_count;
}
static yxstat_t
calculate_curves(const input_t * restrict pinput,
const rng_state_t * restrict rng_state_init,
const grid_t * restrict pgrid,
const alg_t * restrict alg,
const algv_t * restrict algv)
{
yxstat_t *yxstat_head = NULL;
unsigned result_count = calculate_curves_parallel(pinput, rng_state_init, pgrid, &yxstat_head, alg, algv);
return merge_all(&yxstat_head, result_count);
}
static void
print_curves(input_t * restrict pinput, const grid_t * restrict pgrid, const yxstat_t * restrict pyxstat)
{
for (unsigned i = 0; i < NYX; i++) {
print_curves_one(pinput, pgrid, pyxstat->yx[i], i);
}
}
static void
calculate_and_print_curves(input_t * restrict pinput,
const rng_state_t * restrict rng_state_init,
const grid_t * restrict pgrid,
const alg_t * restrict alg,
const algv_t * restrict algv)
{
yxstat_t yxstat = calculate_curves(pinput, rng_state_init, pgrid, alg, algv);
print_curves(pinput, pgrid, &yxstat);
free_stat(&yxstat);
if (pinput->progress) {
fprintf(stderr, "C");
}
}
// -------- Driver --------
void
execute_all(input_t * restrict pinput, const plan_t * restrict pplan)
{
rng_state_t *rng_state_init = rng_state_read(&pinput->rng_state_file);
myclose(&pinput->rng_state_file);
print_head(pinput);
const algv_t * restrict algv = pinput->sparse ? ALG_SPARSE : ALG_DENSE;
if (pplan->requirements & WITH_PERMTEST) {
for (unsigned i = 0; i < NALG; i++) {
if (pplan->palg[i]) {
calculate_and_print_permtest(pinput, rng_state_init, ALG + i, algv + i);
}
}
}
if (pplan->requirements & WITH_CURVES) {
grid_t grid = GRID_NULL;
setup_grid(pinput, pplan, &grid);
for (unsigned i = 0; i < NALG; i++) {
if (pplan->calg[i]) {
calculate_and_print_curves(pinput, rng_state_init, &grid, ALG + i, algv + i);
}
}
}
// Header
myfwrite_uint(&pinput->raw_output_file, CLASS_NONE);
myclose(&pinput->raw_output_file);
if (rng_state_init != NULL) {
free(rng_state_init);
}
}
|
ParallelFor.h | /*
Bullet Continuous Collision Detection and Physics Library
Copyright (c) 2003-2006 Erwin Coumans http://continuousphysics.com/Bullet/
This software is provided 'as-is', without any express or implied warranty.
In no event will the authors be held liable for any damages arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it freely,
subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#include <stdio.h> //printf debugging
#include <algorithm>
// choose threading providers:
#if BT_USE_TBB
#define USE_TBB 1 // use Intel Threading Building Blocks for thread management
#endif
#if BT_USE_PPL
#define USE_PPL 1 // use Microsoft Parallel Patterns Library (installed with Visual Studio 2010 and later)
#endif // BT_USE_PPL
#if BT_USE_OPENMP
#define USE_OPENMP 1 // use OpenMP (also need to change compiler options for OpenMP support)
#endif
#if USE_OPENMP
#include <omp.h>
#endif // #if USE_OPENMP
#if USE_PPL
#include <ppl.h> // if you get a compile error here, check whether your version of Visual Studio includes PPL
// Visual Studio 2010 and later should come with it
#include <concrtrm.h> // for GetProcessorCount()
#endif // #if USE_PPL
#if USE_TBB
#define __TBB_NO_IMPLICIT_LINKAGE 1
#include <tbb/tbb.h>
#include <tbb/task_scheduler_init.h>
#include <tbb/parallel_for.h>
#include <tbb/blocked_range.h>
#endif // #if USE_TBB
class TaskManager
{
public:
enum Api
{
apiNone,
apiOpenMP,
apiTbb,
apiPpl,
apiCount
};
static const char* getApiName( Api api )
{
switch ( api )
{
case apiNone: return "None";
case apiOpenMP: return "OpenMP";
case apiTbb: return "Intel TBB";
case apiPpl: return "MS PPL";
default: return "unknown";
}
}
TaskManager()
{
m_api = apiNone;
m_numThreads = 0;
#if USE_TBB
m_tbbSchedulerInit = NULL;
#endif // #if USE_TBB
}
Api getApi() const
{
return m_api;
}
bool isSupported( Api api ) const
{
#if USE_OPENMP
if ( api == apiOpenMP )
{
return true;
}
#endif
#if USE_TBB
if ( api == apiTbb )
{
return true;
}
#endif
#if USE_PPL
if ( api == apiPpl )
{
return true;
}
#endif
// apiNone is always "supported"
return api == apiNone;
}
void setApi( Api api )
{
if (isSupported(api))
{
m_api = api;
}
else
{
// no compile time support for selected API, fallback to "none"
m_api = apiNone;
}
}
static int getMaxNumThreads()
{
#if USE_OPENMP
return omp_get_max_threads();
#elif USE_PPL
return concurrency::GetProcessorCount();
#elif USE_TBB
return tbb::task_scheduler_init::default_num_threads();
#endif
return 1;
}
int getNumThreads() const
{
return m_numThreads;
}
int setNumThreads( int numThreads )
{
m_numThreads = ( std::max )( 1, numThreads );
#if USE_OPENMP
omp_set_num_threads( m_numThreads );
#endif
#if USE_PPL
{
using namespace concurrency;
if ( CurrentScheduler::Id() != -1 )
{
CurrentScheduler::Detach();
}
SchedulerPolicy policy;
policy.SetConcurrencyLimits( m_numThreads, m_numThreads );
CurrentScheduler::Create( policy );
}
#endif
#if USE_TBB
if ( m_tbbSchedulerInit )
{
delete m_tbbSchedulerInit;
m_tbbSchedulerInit = NULL;
}
m_tbbSchedulerInit = new tbb::task_scheduler_init( m_numThreads );
#endif
return m_numThreads;
}
void init()
{
if (m_numThreads == 0)
{
#if USE_PPL
setApi( apiPpl );
#endif
#if USE_TBB
setApi( apiTbb );
#endif
#if USE_OPENMP
setApi( apiOpenMP );
#endif
setNumThreads(getMaxNumThreads());
}
else
{
setNumThreads(m_numThreads);
}
}
void shutdown()
{
#if USE_TBB
if ( m_tbbSchedulerInit )
{
delete m_tbbSchedulerInit;
m_tbbSchedulerInit = NULL;
}
#endif
}
private:
Api m_api;
int m_numThreads;
#if USE_TBB
tbb::task_scheduler_init* m_tbbSchedulerInit;
#endif // #if USE_TBB
};
extern TaskManager gTaskMgr;
static void initTaskScheduler()
{
gTaskMgr.init();
}
static void cleanupTaskScheduler()
{
gTaskMgr.shutdown();
}
#if USE_TBB
///
/// TbbBodyAdapter -- Converts a body object that implements the
/// "forLoop(int iBegin, int iEnd) const" function
/// into a TBB compatible object that takes a tbb::blocked_range<int> type.
///
template <class TBody>
struct TbbBodyAdapter
{
const TBody* mBody;
void operator()( const tbb::blocked_range<int>& range ) const
{
mBody->forLoop( range.begin(), range.end() );
}
};
#endif // #if USE_TBB
#if USE_PPL
///
/// PplBodyAdapter -- Converts a body object that implements the
/// "forLoop(int iBegin, int iEnd) const" function
/// into a PPL compatible object that implements "void operator()( int ) const"
///
template <class TBody>
struct PplBodyAdapter
{
const TBody* mBody;
int mGrainSize;
int mIndexEnd;
void operator()( int i ) const
{
mBody->forLoop( i, (std::min)(i + mGrainSize, mIndexEnd) );
}
};
#endif // #if USE_PPL
///
/// parallelFor -- interface for submitting work expressed as a for loop to the worker threads
///
template <class TBody>
void parallelFor( int iBegin, int iEnd, int grainSize, const TBody& body )
{
#if USE_OPENMP
if ( gTaskMgr.getApi() == TaskManager::apiOpenMP )
{
#pragma omp parallel for schedule(static, 1)
for ( int i = iBegin; i < iEnd; i += grainSize )
{
body.forLoop( i, (std::min)( i + grainSize, iEnd ) );
}
return;
}
#endif // #if USE_OPENMP
#if USE_PPL
if ( gTaskMgr.getApi() == TaskManager::apiPpl )
{
// PPL dispatch
PplBodyAdapter<TBody> pplBody;
pplBody.mBody = &body;
pplBody.mGrainSize = grainSize;
pplBody.mIndexEnd = iEnd;
// note: MSVC 2010 doesn't support partitioner args, so avoid them
concurrency::parallel_for( iBegin,
iEnd,
grainSize,
pplBody
);
return;
}
#endif //#if USE_PPL
#if USE_TBB
if ( gTaskMgr.getApi() == TaskManager::apiTbb )
{
// TBB dispatch
TbbBodyAdapter<TBody> tbbBody;
tbbBody.mBody = &body;
tbb::parallel_for( tbb::blocked_range<int>( iBegin, iEnd, grainSize ),
tbbBody,
tbb::simple_partitioner()
);
return;
}
#endif // #if USE_TBB
{
// run on main thread
body.forLoop( iBegin, iEnd );
}
}
|
bpmfutils.h | #pragma once
#include <chrono>
#include <Eigen/Sparse>
#include <Eigen/Dense>
#include <cmath>
#include <algorithm>
#include <fstream>
#include <iomanip>
#include <memory>
#include "sparsetensor.h"
inline double tick() {
return std::chrono::duration_cast<std::chrono::duration<double>>(std::chrono::high_resolution_clock::now().time_since_epoch()).count();
}
inline double clamp(double x, double min, double max) {
return x < min ? min : (x > max ? max : x);
}
inline std::pair<double, double> getMinMax(const Eigen::SparseMatrix<double> &mat) {
double min = INFINITY;
double max = -INFINITY;
for (int k = 0; k < mat.outerSize(); ++k) {
for (Eigen::SparseMatrix<double>::InnerIterator it(mat,k); it; ++it) {
double v = it.value();
if (v < min) min = v;
if (v > max) max = v;
}
}
return std::make_pair(min, max);
}
inline void split_work_mpi(int num_latent, int num_nodes, int* work) {
double avg_work = num_latent / (double) num_nodes;
int work_unit;
if (2 <= avg_work) work_unit = 2;
else work_unit = 1;
int min_work = work_unit * (int)floor(avg_work / work_unit);
int work_left = num_latent;
for (int i = 0; i < num_nodes; i++) {
work[i] = min_work;
work_left -= min_work;
}
int i = 0;
while (work_left > 0) {
int take = std::min(work_left, work_unit);
work[i] += take;
work_left -= take;
i = (i + 1) % num_nodes;
}
}
inline void sparseFromIJV(Eigen::SparseMatrix<double> &X, int* rows, int* cols, double* values, int N) {
typedef Eigen::Triplet<double> T;
std::vector<T> tripletList;
tripletList.reserve(N);
for (int n = 0; n < N; n++) {
tripletList.push_back(T(rows[n], cols[n], values[n]));
}
X.setFromTriplets(tripletList.begin(), tripletList.end());
}
inline void sparseFromIJV(Eigen::SparseMatrix<int> &X, int* rows, int* cols, int* values, int N) {
typedef Eigen::Triplet<int> T;
std::vector<T> tripletList;
tripletList.reserve(N);
for (int n = 0; n < N; n++) {
tripletList.push_back(T(rows[n], cols[n], values[n]));
}
X.setFromTriplets(tripletList.begin(), tripletList.end());
}
inline void sparseFromIJV(Eigen::SparseMatrix<double> &X, Eigen::MatrixXi &idx, Eigen::VectorXd &values) {
if (idx.rows() != values.size()) {
throw std::runtime_error("sparseFromIJV: idx.rows() must equal values.size().");
}
if (idx.cols() != 2) {
throw std::runtime_error("sparseFromIJV: idx.cols() must be equal to 2.");
}
typedef Eigen::Triplet<double> T;
std::vector<T> tripletList;
int N = values.size();
tripletList.reserve(N);
for (int n = 0; n < N; n++) {
tripletList.push_back(T(idx(n, 0), idx(n, 1), values(n)));
}
X.setFromTriplets(tripletList.begin(), tripletList.end());
}
inline void sparseFromIJV(Eigen::SparseMatrix<int> &X, Eigen::MatrixXi &idx, Eigen::VectorXi &values) {
if (idx.rows() != values.size()) {
throw std::runtime_error("sparseFromIJV: idx.rows() must equal values.size().");
}
if (idx.cols() != 2) {
throw std::runtime_error("sparseFromIJV: idx.cols() must be equal to 2.");
}
typedef Eigen::Triplet<int> T;
std::vector<T> tripletList;
int N = values.size();
tripletList.reserve(N);
for (int n = 0; n < N; n++) {
tripletList.push_back(T(idx(n, 0), idx(n, 1), values(n)));
}
X.setFromTriplets(tripletList.begin(), tripletList.end());
}
inline double square(double x) { return x * x; }
inline std::pair<double,double> eval_rmse(Eigen::SparseMatrix<double> & P, const int n, Eigen::VectorXd & predictions, Eigen::VectorXd & predictions_var, const Eigen::MatrixXd &sample_m, const Eigen::MatrixXd &sample_u, double mean_rating)
{
double se = 0.0, se_avg = 0.0;
#pragma omp parallel for schedule(dynamic,8) reduction(+:se, se_avg)
for (int k = 0; k < P.outerSize(); ++k) {
int idx = P.outerIndexPtr()[k];
for (Eigen::SparseMatrix<double>::InnerIterator it(P,k); it; ++it) {
const double pred = sample_m.col(it.col()).dot(sample_u.col(it.row())) + mean_rating;
se += square(it.value() - pred);
// https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
double pred_avg;
if (n == 0) {
pred_avg = pred;
} else {
double delta = pred - predictions[idx];
pred_avg = (predictions[idx] + delta / (n + 1));
predictions_var[idx] += delta * (pred - pred_avg);
}
se_avg += square(it.value() - pred_avg);
predictions[idx++] = pred_avg;
}
}
const unsigned N = P.nonZeros();
const double rmse = sqrt( se / N );
const double rmse_avg = sqrt( se_avg / N );
return std::make_pair(rmse, rmse_avg);
}
std::pair<double,double> eval_rmse_tensor(
SparseMode & sparseMode,
const int Nepoch,
Eigen::VectorXd & predictions,
Eigen::VectorXd & predictions_var,
std::vector< std::unique_ptr<Eigen::MatrixXd> > & samples,
double mean_value);
inline void row_mean_var(Eigen::VectorXd & mean, Eigen::VectorXd & var, const Eigen::MatrixXd X) {
const int N = X.cols();
const int D = X.rows();
mean.resize(D);
var.resize(D);
mean.setZero();
var.setZero();
#pragma omp parallel
{
Eigen::VectorXd tmp(D);
tmp.setZero();
#pragma omp for schedule(static)
for (int i = 0; i < N; i++) {
for (int d = 0; d < D; d++) {
tmp(d) += X(d, i);
}
}
#pragma omp critical
{
mean += tmp;
}
}
// computing mean
mean /= N;
#pragma omp parallel
{
Eigen::VectorXd tmp(D);
tmp.setZero();
#pragma omp for schedule(static)
for (int i = 0; i < N; i++) {
for (int d = 0; d < D; d++) {
tmp(d) += square(X(d, i) - mean(d));
}
}
#pragma omp critical
{
var += tmp;
}
}
var /= N;
}
inline void writeToCSVfile(std::string filename, Eigen::MatrixXd matrix) {
const static Eigen::IOFormat csvFormat(6, Eigen::DontAlignCols, ",", "\n");
std::ofstream file(filename.c_str());
file << matrix.format(csvFormat);
}
inline std::string to_string_with_precision(const double a_value, const int n = 6)
{
std::ostringstream out;
out << std::setprecision(n) << a_value;
return out.str();
}
inline double auc(Eigen::VectorXd & pred, Eigen::VectorXd & test)
{
Eigen::VectorXd stack_x(pred.size());
Eigen::VectorXd stack_y(pred.size());
double auc = 0.0;
if (pred.size() == 0) {
return NAN;
}
std::vector<unsigned int> permutation( pred.size() );
for(unsigned int i = 0; i < pred.size(); i++) {
permutation[i] = i;
}
std::sort(permutation.begin(), permutation.end(), [&pred](unsigned int a, unsigned int b) { return pred[a] < pred[b];});
double NP = test.sum();
double NN = test.size() - NP;
//Build stack_x and stack_y
stack_x[0] = test[permutation[0]];
stack_y[0] = 1-stack_x[0];
for(int i=1; i < pred.size(); i++) {
stack_x[i] = stack_x[i-1] + test[permutation[i]];
stack_y[i] = stack_y[i-1] + 1 - test[permutation[i]];
}
for(int i=0; i < pred.size() - 1; i++) {
auc += (stack_x(i+1) - stack_x(i)) * stack_y(i+1); //TODO:Make it Eigen
}
return auc / (NP*NN);
}
|
GB_binop__isge_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isge_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__isge_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__isge_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__isge_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_int16)
// A*D function (colscale): GB (_AxD__isge_int16)
// D*A function (rowscale): GB (_DxB__isge_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__isge_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__isge_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_int16)
// C=scalar+B GB (_bind1st__isge_int16)
// C=scalar+B' GB (_bind1st_tran__isge_int16)
// C=A+scalar GB (_bind2nd__isge_int16)
// C=A'+scalar GB (_bind2nd_tran__isge_int16)
// C type: int16_t
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 0
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGE || GxB_NO_INT16 || GxB_NO_ISGE_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isge_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isge_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isge_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isge_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isge_int16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isge_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isge_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isge_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isge_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isge_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isge_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isge_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__isge_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__isge_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
vmul.c | /*
This file is part of HiParTI!.
HiParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
HiParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with HiParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <HiParTI.h>
#include <stdio.h>
/**
* SpMV, y = Ax
*/
int ptiSparseMatrixMulVectorCSR(ptiValueVector * y, ptiSparseMatrixCSR *csrmtx, ptiValueVector * x)
{
#ifdef HIPARTI_USE_OPENMP
#pragma omp parallel for
#endif
for(ptiIndex i = 0; i < csrmtx->nrows; ++i) {
for(ptiNnzIndex z = csrmtx->rowptr.data[i]; z < csrmtx->rowptr.data[i+1]; ++z) {
ptiIndex col = csrmtx->colind.data[z];
y->data[i] += csrmtx->values.data[z] * x->data[col];
}
}
return 0;
}
#ifdef HIPARTI_USE_OPENMP
int ptiOmpSparseMatrixMulVectorCSR(ptiValueVector * y, ptiSparseMatrixCSR *csrmtx, ptiValueVector * x)
{
#pragma omp parallel for
for(ptiIndex i = 0; i < csrmtx->nrows; ++i) {
for(ptiNnzIndex z = csrmtx->rowptr.data[i]; z < csrmtx->rowptr.data[i+1]; ++z) {
ptiIndex col = csrmtx->colind.data[z];
y->data[i] += csrmtx->values.data[z] * x->data[col];
}
}
return 0;
}
int ptiOmpSparseMatrixMulVectorCSRReduce(ptiValueVector * y, const ptiSparseMatrixCSR *mtx, ptiValueVector * x){
ptiValueVector * ybufs;
int nthreads =1;
#pragma omp parallel
nthreads=omp_get_num_threads();
ybufs = (ptiValueVector *) malloc(nthreads * sizeof(ptiValueVector));
for(int t=0; t<nthreads; ++t) {
ptiNewValueVector(&ybufs[t], mtx->nrows, mtx->nrows);
ptiConstantValueVector(&ybufs[t], 0);
}
ptiOmpSparseMatrixMulVectorCSR_Reduce(y, ybufs, mtx, x);
for(int t=0; t<nthreads; ++t) {
ptiFreeValueVector(&ybufs[t]);
}
free(ybufs);
return 0;
}
int ptiOmpSparseMatrixMulVectorCSR_Reduce(ptiValueVector *y, ptiValueVector * ybufs, const ptiSparseMatrixCSR *csrmtx, ptiValueVector * x)
{
int nthreads;
#pragma omp parallel
nthreads=omp_get_num_threads();
#pragma omp parallel for // schedule(static)
for(ptiIndex i = 0; i < csrmtx->nrows; ++i) {
for(ptiNnzIndex z = csrmtx->rowptr.data[i]; z < csrmtx->rowptr.data[i+1]; ++z) {
int tid = omp_get_thread_num();
ptiIndex col = csrmtx->colind.data[z];
ybufs[tid].data[i] += csrmtx->values.data[z] * x->data[col];
}
}
/*Reduction*/
#pragma omp parallel for schedule(static)
for(ptiIndex r=0; r<y->len; ++r){
for (int t=0; t<nthreads; ++t){
y->data[r] +=ybufs[t].data[r];
}
}
return 0;
}
#endif
|
MathTools.h | /**
*
* \copyright
* Copyright (c) 2012-2019, OpenGeoSys Community (http://www.opengeosys.org)
* Distributed under a Modified BSD License.
* See accompanying file LICENSE.txt or
* http://www.opengeosys.org/project/license
*
*/
#pragma once
#include <boost/math/constants/constants.hpp>
#include <cstddef>
#ifdef _OPENMP
#include <omp.h>
#endif
namespace MathLib
{
/**
* standard inner product in R^N
* \param v0 array of type T representing the vector
* \param v1 array of type T representing the vector
* */
template<typename T, int N> inline
T scalarProduct(T const * const v0, T const * const v1)
{
T res (v0[0] * v1[0]);
#pragma omp parallel for reduction (+:res)
for (int k = 1; k < N; k++)
res += v0[k] * v1[k];
return res;
}
template <> inline
double scalarProduct<double,3>(double const * const v0, double const * const v1)
{
double res (v0[0] * v1[0]);
for (std::size_t k(1); k < 3; k++)
res += v0[k] * v1[k];
return res;
}
template <typename T>
inline T scalarProduct(T const* const v0, T const* const v1, int const n)
{
T res (v0[0] * v1[0]);
#pragma omp parallel for reduction (+:res)
for (int k = 1; k < n; k++)
res += v0[k] * v1[k];
return res;
}
/**
* calcProjPntToLineAndDists computes the orthogonal projection
* of a point p to the line described by the points a and b,
* \f$g(\lambda) = a + \lambda (b - a)\f$,
* the distance between p and the projected point
* and the distances between the projected point and the end
* points a, b of the line
* \param p the (mesh) point
* \param a first point of line
* \param b second point of line
* \param lambda the projected point described by the line equation above
* \param d0 distance to the line point a
* \returns the distance between p and the orthogonal projection of p
*/
double calcProjPntToLineAndDists(const double p[3], const double a[3],
const double b[3], double &lambda, double &d0);
/** squared dist between double arrays p0 and p1 (size of arrays is 3) */
inline
double sqrDist(const double* p0, const double* p1)
{
const double v[3] = {p1[0] - p0[0], p1[1] - p0[1], p1[2] - p0[2]};
return scalarProduct<double,3>(v,v);
}
/**
* Let \f$p_0, p_1, p_2 \in R^3\f$. The function getAngle
* computes the angle between the edges \f$(p_0,p_1)\f$ and \f$(p_1,p_2)\f$
* @param p0 start point of edge 0
* @param p1 end point of edge 0 and start point of edge 1
* @param p2 end point of edge 1
* @return the angle between the edges
*/
double getAngle (const double p0[3], const double p1[3], const double p2[3]);
/// converts the given degrees to radians
inline double to_radians(double degrees) {
return degrees*boost::math::constants::pi<double>()/180.;
}
template<typename Type> Type limitValueInInterval(const Type variable,
const Type lower_bound,
const Type upper_bound)
{
if (variable < lower_bound)
return lower_bound;
if (variable > upper_bound)
return upper_bound;
return variable;
}
} // namespace
|
matrixmultiply-ompacc.c | /*
Naive matrix-matrix multiplication(mmm)
By C. Liao
*/
#include <stdio.h>
#include <assert.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#define N 1024
#define M 1024
#define K 1024
#define REAL float
int i,j,k;
REAL a[N][M],b[M][K],c[N][K], c2[N][K];
int init();
int mmm();
int mmm2();
int verify();
int main(void)
{
init();
mmm();
mmm2();
return verify();
}
int init()
{
for (i=0;i<N;i++)
for(j=0;j<M;j++)
a[i][j]=3.0*i*j/N/M;
for (i=0;i<M;i++)
for(j=0;j<K;j++)
b[i][j]=5.0*j*i/N/M;
for (i=0;i<N;i++)
for(j=0;j<K;j++)
{
c[i][j]=0.0;
c2[i][j]=0.0;
}
return 0;
}
/*
TODO: try different i,j,k orders
a b e f a*e+ b*g , a*f+ b*h
c d x g h = c*e+ d*g, c*f+ d*h
*/
int mmm()
{
//For static arrays with known dimension info. , no array section info. is needed
//#pragma omp target map(tofrom:c[0:N][0:M]), map(to:a[0:N][0:M],b[0:M][0:K])
#pragma omp target map(tofrom:c), map(to:a,b)
#pragma omp parallel for private(i,j,k)
for (i = 0; i < N; i++)
for (j = 0; j < M; j++)
for (k = 0; k < K; k++)
c[i][j]= c[i][j]+a[i][k]*b[k][j];
return 0;
}
int mmm2()
{
for (i = 0; i < N; i++)
for (j = 0; j < M; j++)
for (k = 0; k < K; k++)
c2[i][j]= c2[i][j]+a[i][k]*b[k][j];
return 0;
}
int verify()
{
REAL sum=0.0, sum2=0.0;
for (i=0;i<N;i++)
for(j=0;j<K;j++)
{
sum+=c[i][j];
sum2+=c2[i][j];
}
printf("sum of c[i][j] is %f\n",sum);
printf("sum of c2[i][j] is %f\n",sum2);
assert (sum == sum2);
return 0;
}
|
GB_unop__acosh_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__acosh_fc64_fc64)
// op(A') function: GB (_unop_tran__acosh_fc64_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = cacosh (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = cacosh (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = cacosh (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ACOSH || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__acosh_fc64_fc64)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = cacosh (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = cacosh (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__acosh_fc64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
main.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <sys/time.h>
#define dataSize 10000
int data[dataSize];
int main(int argc, char* argv[])// 每次并行时即时产生多个线程
{
int i, j;
struct timeval start,end;
int thread = 1;
FILE * fpt;
fpt = fopen("result.txt","w");
srand(2);
for(thread =1;thread<16;++thread)
{
for(int k=0;k<4;++k)
{
#pragma omp parallel for num_threads(thread)
for (i = 0; i < dataSize; i++)
data[i] = rand();
gettimeofday(&start, NULL);
for (i = 0; i < dataSize; i++) {
if (i % 2) {
#pragma omp parallel for num_threads(thread) default(none) shared(data) private(j)
for (j = 1; j < dataSize - 1; j += 2) {
if (data[j + 1] < data[j])
data[j + 1] ^= data[j], data[j] ^= data[j + 1], data[j + 1] ^= data[j];
}
} else {
#pragma omp parallel for num_threads(thread) default(none) shared(data) private(j)
for (j = 1; j < dataSize; j += 2) {
if (data[j - 1] > data[j])
data[j - 1] ^= data[j], data[j] ^= data[j - 1], data[j - 1] ^= data[j];
}
}
}
gettimeofday(&end, NULL);
const long persec = 1000000;
double costtime = (double) ((end.tv_sec - start.tv_sec) * persec + end.tv_usec - start.tv_usec) / persec;
fprintf(fpt, "thread num:%d finish cost time: %.6f s\n", thread, (double) costtime);
}
}
fclose(fpt);
return 0;
}
/*
int main1(int argc, char* argv[])// 预先产生需要的线程
{
int i, j, threadReal, data[dataSize];
clock_t time;
if (argc > 1 && *argv[1] > '0'&& *argv[1] < '9')
threadReal = atoi(argv[1]);
else
threadReal = thread;
srand(2);
#pragma omp parallel for num_threads(thread)
for (i = 0; i < dataSize; i++)
data[i] = rand();
time = clock();
#pragma omp parallel num_threads(thread) default(none) shared(data) private(i, j) // 预先说明并行的参数,特别是规定使用的线程数
for (i = 0; i < dataSize; i++) // 注意这层没有 for 子句,且 i 也作为私有变量
{
if (i % 2)
{
#pragma omp for// 每次并行时使用预先规定的线程数和参数,使用完毕后保留,直到退出外层循环再销毁
for (j = 1; j < dataSize - 1; j += 2)
{
if (data[j + 1] < data[j])
data[j + 1] ^= data[j], data[j] ^= data[j + 1], data[j + 1] ^= data[j];
}
}
else
{
#pragma omp for
for (j = 1; j < dataSize; j += 2)
{
if (data[j - 1] > data[j])
data[j - 1] ^= data[j], data[j] ^= data[j - 1], data[j - 1] ^= data[j];
}
}
}
time = clock() - time;
printf("Sort finished!\n");
for (i = 0; i < dataSize - 1; i++)
{
if (data[i] > data[i + 1])
{
printf("Sort error at %d, data[i] = %d,data[i+1] = %d, time = %ld ms\n", i, data[i], data[i + 1], time);
break;
}
}
if (i == dataSize - 1)
printf("Sort correct, time = %ld ms\n", time);
getchar();
return 0;
}
#include <stdio.h>
#include <omp.h>
#include <stdlib.h>
#include <sys/time.h>
enum bool
{
false,true
};
#define dataSize 20000
#define threads 4
int data[dataSize];
void displayArray( int* array)
{
int i;
for(i=0;i<dataSize;++i)
{
if(0==i%1000) {
printf("%d ", *(array + i));
}
}
}
enum bool checkArray(int* a,int length,enum bool reverse)
{
int i;
if(!reverse)
{
for(i=0;i<length-1;++i)
if(a[i]>a[i+1])
return 0;
}
else{
for(i=0;i<length-1;++i)
if(a[i]<a[i+1])
return 0;
}
return 1;
}
int main() {
enum bool issorted = false;
struct timeval start,end;
int i=0;
for(int i=0;i<dataSize;++i)
data[i]= random()%dataSize;
int thread_num[4]={0,0,0,0};
gettimeofday(&start,NULL);
omp_set_num_threads(threads);
while(!issorted){
issorted=true;
#pragma omp parallel for num_threads(threads) private(i) shared(data)
for (i = 0; i < dataSize - 1; i += 2) {
if (data[i] > data[i + 1]) {
int temp;
temp = data[i];
data[i] = data[i + 1];
data[i + 1] = temp;
issorted = false;
}
}
#pragma omp parallel for num_threads(threads) private(i) shared(data)
for ( i = 1; i < dataSize - 1; i += 2)
if (data[i] > data[i + 1]) {
int temp;
temp = data[i];
data[i] = data[i + 1];
data[i + 1] = temp;
issorted = false;
}
}
gettimeofday(&end,NULL);
const long persec =1000000;
double costtime =(double)((end.tv_sec-start.tv_sec)*persec+end.tv_usec-start.tv_usec)/persec;
if(1==checkArray(data,dataSize,false))
printf("correct\n");
else
printf("uncorrect\n");
printf("%d %d %d %d\n",thread_num[0],thread_num[1],thread_num[2],thread_num[3]);
printf("finish\ncost time: %.6fs",(double)costtime);
return 0;
}*/ |
5442.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp parallel for private(j) collapse(2) schedule(dynamic, 16) num_threads(4)
for (i = 1; i < _PB_NI - 1; ++i)
{
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
GB_binop__bor_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bor_int64
// A.*B function (eWiseMult): GB_AemultB__bor_int64
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bor_int64
// C+=b function (dense accum): GB_Cdense_accumb__bor_int64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bor_int64
// C=scalar+B GB_bind1st__bor_int64
// C=scalar+B' GB_bind1st_tran__bor_int64
// C=A+scalar GB_bind2nd__bor_int64
// C=A'+scalar GB_bind2nd_tran__bor_int64
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij) | (bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x) | (y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BOR || GxB_NO_INT64 || GxB_NO_BOR_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bor_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bor_int64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bor_int64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__bor_int64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bor_int64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bor_int64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t bij = Bx [p] ;
Cx [p] = (x) | (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bor_int64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
Cx [p] = (aij) | (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (x) | (aij) ; \
}
GrB_Info GB_bind1st_tran__bor_int64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (aij) | (y) ; \
}
GrB_Info GB_bind2nd_tran__bor_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
test.c | #include <stdio.h>
#include <omp.h>
#pragma omp requires unified_shared_memory
#include "../utilities/check.h"
#include "../utilities/utilities.h"
// enable tests
#define CHECK 1
#define FULL 1
#define FULL_ZERO 0 /* use zero ptrs, not legal (yet) */
#define FULL_S 0 /* need struct support */
#define OFFSET 1
#define OFFSET_S 0 /* need struct support */
#define N (992)
#define INIT() INIT_LOOP(N, {A[i] = 0; C[i] = 1; D[i] = i; E[i] = -i; })
int main(void){
#if CHECK
check_offloading();
#endif
int fail;
double A[N], B[N], C[N], D[N], E[N];
double *pA, *pB, *pC, *pD, *pE;
// map ptrs
pA = &A[0];
pB = &B[0];
pC = &C[0];
pD = &D[0];
pE = &E[0];
#if FULL
INIT();
#pragma omp target data map(from: pA[0:N]) map(to: pC[0:N], pD[0:N]) device(1)
{
#pragma omp target device(1)
{
#pragma omp parallel for schedule(static,1)
for(int i = 0; i < N; i++)
pA[i] = pC[i] + pD[i] + 1;
}
#pragma omp target update from(pA[0:N]) device(1)
// CHECK: Succeeded in "update from"
fail = 0;
VERIFY(0, N, A[i], (double)(i+2));
if (fail) {
printf ("Test update from: Failed\n");
} else {
printf ("Test update from: Succeeded\n");
}
// Now modify host arrays C and D
for(int i = 0; i < N; i++){
C[i] = 2;
D[i] = i + 1;
}
#pragma omp target update to(pC[0:N], pD[0:N]) device(1)
#pragma omp target device(1)
{
#pragma omp parallel for schedule(static,1)
for(int i = 0; i < N; i++)
pA[i] = pC[i] + pD[i] + 1;
}
#pragma omp target update from(pA[0:N]) device(1)
// CHECK: Succeeded in "update to"
fail = 0;
VERIFY(0, N, A[i], (double)(i+4));
if (fail) {
printf ("Test update to: Failed\n");
} else {
printf ("Test update to: Succeeded\n");
}
}
#endif
#if FULL_ZERO
INIT();
#pragma omp target data map(from: pA[0:N]) map(to: pC[0:N], pD[0:N]) device(1)
{
#pragma omp target device(1)
{
#pragma omp parallel for schedule(static,1)
for(int i = 0; i < N; i++)
pA[i] = pC[i] + pD[i] + 1;
}
#pragma omp target update from(pA[0:0]) device(1)
// CHECK: Succeeded in "update from"
fail = 0;
VERIFY(0, N, A[i], (double)(i+2));
if (fail) {
printf ("Test update from with zero-length ptrs: Failed\n");
} else {
printf ("Test update from with zero-length ptrs: Succeeded\n");
}
// Now modify host arrays C and D
for(int i = 0; i < N; i++){
C[i] = 2;
D[i] = i + 1;
}
#pragma omp target update to(pC[0:0], pD[0:0]) device(1)
#pragma omp target device(1)
{
#pragma omp parallel for schedule(static,1)
for(int i = 0; i < N; i++)
pA[i] = pC[i] + pD[i] + 1;
}
#pragma omp target update from(pA[0:0]) device(1)
// CHECK: Succeeded in "update to"
fail = 0;
VERIFY(0, N, A[i], (double)(i+4));
if (fail) {
printf ("Test update to with zero-length ptrs: Failed\n");
} else {
printf ("Test update to with zero-length ptrs: Succeeded\n");
}
}
#endif
#if OFFSET
pA = pA - 100;
pC = pC - 200;
pD = pD - 300;
INIT();
#pragma omp target data map(from: pA[100:N]) map(to: pC[200:N], pD[300:N])\
device(1)
{
#pragma omp target map(from: pA[100:N]) map(to: pC[200:N], pD[300:N])\
device(1)
{
#pragma omp parallel for schedule(static,1)
for(int i = 0; i < N; i++)
pA[i+100] = pC[i+200] + pD[i+300] + 1;
}
#pragma omp target update from(pA[100:N]) device(1)
// CHECK: Succeeded in "update from"
fail = 0;
VERIFY(0, N, A[i], (double)(i+2));
if (fail) {
printf ("Test update from with offsets: Failed\n");
} else {
printf ("Test update from with offsets: Succeeded\n");
}
// Now modify host arrays C and D
for(int i = 0; i < N; i++){
C[i] = 2;
D[i] = i + 1;
}
#pragma omp target update to(pC[200:N], pD[300:N]) device(1)
#pragma omp target map(from: pA[100:N]) map(to: pC[200:N], pD[300:N])\
device(1)
{
#pragma omp parallel for schedule(static,1)
for(int i = 0; i < N; i++)
pA[i+100] = pC[i+200] + pD[i+300] + 1;
}
#pragma omp target update from(pA[100:N]) device(1)
// CHECK: Succeeded in "update to"
fail = 0;
VERIFY(0, N, A[i], (double)(i+4));
if (fail) {
printf ("Test update to with offsets: Failed\n");
} else {
printf ("Test update to with offsets: Succeeded\n");
}
}
#endif
return 0;
}
|
target_teams_distribute_parallel_for_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s -Wuninitialized
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target teams distribute parallel for'}}
#pragma omp target teams distribute parallel for
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target teams distribute parallel for'}}
#pragma omp target teams distribute parallel for foo
void test_no_clause(void) {
int i;
#pragma omp target teams distribute parallel for
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp target teams distribute parallel for' must be a for loop}}
#pragma omp target teams distribute parallel for
++i;
}
void test_branch_protected_scope(void) {
int i = 0;
L1:
++i;
int x[24];
#pragma omp target teams distribute parallel for
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause(void) {
int i;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute parallel for' are ignored}}
#pragma omp target teams distribute parallel for foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers(void) {
int i, x;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute parallel for' are ignored}}
#pragma omp target teams distribute parallel for;
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute parallel for' are ignored}}
#pragma omp target teams distribute parallel for private(x);
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute parallel for' are ignored}}
#pragma omp target teams distribute parallel for, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo(void);
void test_collapse(void) {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp target teams distribute parallel for collapse
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute parallel for collapse(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for collapse()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute parallel for collapse(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute parallel for collapse(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp target teams distribute parallel for' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp target teams distribute parallel for collapse 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute parallel for collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute parallel for collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute parallel for collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for', but found only 1}}
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute parallel for collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute parallel for collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute parallel for collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for', but found only 1}}
#pragma omp target teams distribute parallel for collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute parallel for collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for', but found only 1}}
// expected-error@+1 {{integer constant expression}}
#pragma omp target teams distribute parallel for collapse(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{integer constant expression}}
#pragma omp target teams distribute parallel for collapse(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target teams distribute parallel for collapse(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target teams distribute parallel for collapse(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target teams distribute parallel for collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{loop iteration variable in the associated loop of 'omp target teams distribute parallel for' directive may not be firstprivate, predetermined as private}}
// expected-note@+1 {{defined as firstprivate}}
#pragma omp target teams distribute parallel for collapse(2) firstprivate(i)
for (i = 0; i < 16; ++i)
for (int j = 0; j < 16; ++j)
#pragma omp parallel for reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_private(void) {
int i;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute parallel for private(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute parallel for private(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute parallel for private(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for private()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for private(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target teams distribute parallel for private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target teams distribute parallel for private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute parallel for private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute parallel for private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate(void) {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for lastprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute parallel for lastprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute parallel for lastprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for lastprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for lastprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target teams distribute parallel for lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target teams distribute parallel for lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute parallel for lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute parallel for lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate(void) {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for firstprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute parallel for firstprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute parallel for firstprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for firstprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for firstprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target teams distribute parallel for firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
// expected-error@+1 {{lastprivate variable cannot be firstprivate}} expected-note@+1 {{defined as lastprivate}}
#pragma omp target teams distribute parallel for lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{lastprivate variable cannot be firstprivate}} expected-note@+1 2 {{defined as lastprivate}}
#pragma omp target teams distribute parallel for lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 3 {{lastprivate variable cannot be firstprivate}} expected-note@+1 3 {{defined as lastprivate}}
#pragma omp target teams distribute parallel for lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages(void) {
float a[100], b[100], c[100];
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target teams distribute parallel for
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target teams distribute parallel for
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
|
GB_binop__bshift_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bshift_uint8)
// A.*B function (eWiseMult): GB (_AemultB_01__bshift_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__bshift_uint8)
// A.*B function (eWiseMult): GB (_AemultB_03__bshift_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bshift_uint8)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bshift_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__bshift_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bshift_uint8)
// C=scalar+B GB (_bind1st__bshift_uint8)
// C=scalar+B' GB (_bind1st_tran__bshift_uint8)
// C=A+scalar GB (_bind2nd__bshift_uint8)
// C=A'+scalar GB (_bind2nd_tran__bshift_uint8)
// C type: uint8_t
// A type: uint8_t
// B,b type: int8_t
// BinaryOp: cij = GB_bitshift_uint8 (aij, bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
0
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_bitshift_uint8 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSHIFT || GxB_NO_UINT8 || GxB_NO_BSHIFT_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bshift_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bshift_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bshift_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bshift_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bshift_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bshift_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bshift_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bshift_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bshift_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_bitshift_uint8 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bshift_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_bitshift_uint8 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_uint8 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__bshift_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_uint8 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__bshift_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
execute_test.c | /*
* _EXECUTE_TEST_C_
*
* HMC-SIM SPMV TEST EXECUTION FUNCTIONS
* FOR THE FINITE STATE MACHINE
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include "spmv.h"
#include "hmc_sim.h"
/* ------------------------------------------ ZERO_PACKET */
/*
* ZERO_PACKET
*
*/
static void zero_packet( uint64_t *packet )
{
uint64_t i = 0x00ll;
/*
* zero the packet
*
*/
for( i=0; i<HMC_MAX_UQ_PACKET; i++ ){
packet[i] = 0x00ll;
}
return ;
}
/* ------------------------------------------ EXECUTE_TEST */
/*
* executes a sparse matrix vector multiply
* using a compressed sparse row format
*
* #pragma omp parallel shared( mat, product )
* {
* for( i=0; i<nrows; i++ ){
*
* #pragma omp for nowait
* for( j=rows[i]; j<rows[i+1]; j++ ){
* tmp = cols[j];
* products[i] += vals[j] * x_vect[tmp];
* }
*
* __amo_add_u8( &product, products[i] );
* }
* }
*/
extern int execute_test( struct hmcsim_t *hmc,
struct csr_t *mat,
uint32_t num_threads,
uint32_t simd,
int nzero,
int nrows,
int ncols )
{
/* vars */
uint64_t packet[HMC_MAX_UQ_PACKET];
uint64_t head = 0x00ll;
uint64_t tail = 0x00ll;
uint64_t payload[8] = {0x00ll, 0x00ll, 0x00ll, 0x00ll,
0x00ll, 0x00ll, 0x00ll, 0x00ll };
uint8_t cub = 0;
uint16_t tag = 1;
uint8_t link = 0;
int ret = 0;
FILE *ofile = NULL;
uint32_t done = 0;
uint64_t outer = 0x00ll;
uint32_t i = 0;
uint64_t niter = 0x00ll;
uint64_t *start = NULL; /* starting point of each thread */
uint64_t *end = NULL; /* ending point of each thread */
uint64_t *cur = NULL; /* current index of each thread */
uint64_t *count = NULL; /* completed requests per thread */
uint64_t *status = NULL; /* status signals for each thread */
uint64_t *scalar = NULL; /* status signals for scalars */
/* ---- */
/* allocate the thread-local memory constructs */
cur = malloc( sizeof( uint64_t ) * num_threads );
count = malloc( sizeof( uint64_t ) * num_threads );
start = malloc( sizeof( uint64_t ) * num_threads );
end = malloc( sizeof( uint64_t ) * num_threads );
status = malloc( sizeof( uint64_t ) * num_threads );
scalar = malloc( sizeof( uint64_t ) * num_threads );
/* init all the data */
/* setup the hmc-sim tracing mechansims */
ofile = fopen( "spmv.out", "w" );
if( ofile == NULL ){
printf( "FAILED : COULD NOT OPEN OUPUT FILE stream.out\n" );
return -1;
}
hmcsim_trace_handle( hmc, ofile );
hmcsim_trace_level( hmc, (HMC_TRACE_BANK|
HMC_TRACE_QUEUE|
HMC_TRACE_CMD|
HMC_TRACE_STALL|
HMC_TRACE_LATENCY) );
printf( "SUCCESS : INITIALIZED TRACE HANDLERS\n" );
/*
* zero the packet
*
*/
zero_packet( &(packet[0]) );
printf( "BEGINNING EXECUTION\n" );
/*
* we implicitly loop around the number of rows
* all threads execute this portion of the algorithm
* in parallel, synchronously
*
*/
for( outer=0; outer<nrows; outer++ ){
/*
* the inner loop is explicitly parallelized
* across the number of available threads.
* this is the core of our computational loop
*
*/
/* setup the necessary control */
while( done < num_threads ){
for( i=0; i<num_threads; i++ ){
} /* end for loop: i=0; i<num_threads; i++ */
/*
* drain all the responses off all the links
*
*/
for( i=0; i<hmc->num_links; i++ ){
ret = HMC_OK;
while( ret != HMC_STALL ){
ret = hmcsim_recv( hmc, cub, i, &(packet[0]) );
}
}
/*
* clock the sim
*
*/
hmcsim_clock( hmc );
}/* end while loop: done < num_threads */
} /* end for loop: outer=0; outer<nrows; outer++ */
printf( "SUCCESS : EXECUTION COMPLETE\n" );
complete_failure:
fclose( ofile );
ofile = NULL;
free( cur );
free( count );
free( start );
free( end );
free( status );
free( scalar );
return 0;
}
/* EOF */
|
flowinfo_ipv4_dst.c | /*
* Copyright 2014-2016 Nippon Telegraph and Telephone Corporation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file flowinfo_ipv4_dst.c
* @brief Optimized flow database for dataplane, for ipv4_dst
*/
#include <stdlib.h>
#include "openflow.h"
#include "lagopus_apis.h"
#include "lagopus/flowdb.h"
#include "pktbuf.h"
#include "packet.h"
#include "lagopus/flowinfo.h"
#define OXM_FIELD_TYPE(field) ((field) >> 1)
#define IPV4_DST_BITLEN (32)
static lagopus_result_t
add_flow_ipv4_dst_mask(struct flowinfo *, struct flow *);
static lagopus_result_t
del_flow_ipv4_dst_mask(struct flowinfo *, struct flow *);
static struct flow *
match_flow_ipv4_dst_mask(struct flowinfo *, struct lagopus_packet *,
int32_t *);
static struct flow *
find_flow_ipv4_dst_mask(struct flowinfo *, struct flow *);
static void
destroy_flowinfo_ipv4_dst_mask(struct flowinfo *);
static lagopus_result_t
add_flow_ipv4_dst(struct flowinfo *, struct flow *);
static lagopus_result_t
del_flow_ipv4_dst(struct flowinfo *, struct flow *);
static struct flow *
match_flow_ipv4_dst(struct flowinfo *, struct lagopus_packet *, int32_t *);
static struct flow *
find_flow_ipv4_dst(struct flowinfo *, struct flow *);
static void
destroy_flowinfo_ipv4_dst(struct flowinfo *);
static lagopus_result_t
get_match_ipv4_dst(const struct match_list *match_list,
uint32_t *ipv4_dst,
uint32_t *mask) {
const struct match *match;
TAILQ_FOREACH(match, match_list, entry) {
if (match->oxm_field == (OFPXMT_OFB_IPV4_DST << 1) + 1) {
OS_MEMCPY(ipv4_dst, match->oxm_value, sizeof(*ipv4_dst));
OS_MEMCPY(mask, &match->oxm_value[4], sizeof(*mask));
break;
}
if (OXM_FIELD_TYPE(match->oxm_field) == OFPXMT_OFB_IPV4_DST) {
OS_MEMCPY(ipv4_dst, match->oxm_value, sizeof(*ipv4_dst));
*mask = 0xffffffff;
break;
}
}
if (match == NULL) {
return LAGOPUS_RESULT_NOT_FOUND;
}
return LAGOPUS_RESULT_OK;
}
struct flowinfo *
new_flowinfo_ipv4_dst_mask(void) {
struct flowinfo *self;
self = calloc(1, sizeof(struct flowinfo));
if (self != NULL) {
self->nflow = 0;
self->nnext = 0;
self->next = malloc(1);
self->misc = new_flowinfo_ipv4_src_mask();
self->add_func = add_flow_ipv4_dst_mask;
self->del_func = del_flow_ipv4_dst_mask;
self->match_func = match_flow_ipv4_dst_mask;
self->find_func = find_flow_ipv4_dst_mask;
self->destroy_func = destroy_flowinfo_ipv4_dst_mask;
}
return self;
}
static void
destroy_flowinfo_ipv4_dst_mask(struct flowinfo *self) {
struct flowinfo *flowinfo;
unsigned int i;
for (i = 0; i < self->nnext; i++) {
flowinfo = self->next[i];
flowinfo->destroy_func(flowinfo);
}
free(self->next);
free(self);
}
static void
freeup_flowinfo(void *val) {
struct flowinfo *flowinfo;
flowinfo = val;
flowinfo->destroy_func(flowinfo);
}
struct flowinfo *
new_flowinfo_ipv4_dst(void) {
struct flowinfo *self;
self = calloc(1, sizeof(struct flowinfo));
if (self != NULL) {
lagopus_hashmap_create(&self->hashmap, LAGOPUS_HASHMAP_TYPE_ONE_WORD,
freeup_flowinfo);
/* misc is not used */
self->add_func = add_flow_ipv4_dst;
self->del_func = del_flow_ipv4_dst;
self->match_func = match_flow_ipv4_dst;
self->find_func = find_flow_ipv4_dst;
self->destroy_func = destroy_flowinfo_ipv4_dst;
}
return self;
}
static void
destroy_flowinfo_ipv4_dst(struct flowinfo *self) {
lagopus_hashmap_destroy(&self->hashmap, true);
free(self);
}
static lagopus_result_t
add_flow_ipv4_dst_mask(struct flowinfo *self, struct flow *flow) {
struct flowinfo *flowinfo;
uint32_t ipv4_dst, mask;
lagopus_result_t rv;
unsigned int i;
rv = get_match_ipv4_dst(&flow->match_list, &ipv4_dst, &mask);
if (rv == LAGOPUS_RESULT_OK) {
rv = LAGOPUS_RESULT_NOT_FOUND;
for (i = 0; i < self->nnext; i++) {
if (self->next[i]->userdata == mask) {
flowinfo = self->next[i];
rv = LAGOPUS_RESULT_OK;
break;
}
}
if (rv == LAGOPUS_RESULT_NOT_FOUND) {
/* new node. */
flowinfo = new_flowinfo_ipv4_dst();
flowinfo->userdata = mask;
self->next = realloc(self->next,
(unsigned long)(self->nnext + 1) *
sizeof(struct flowinfo *));
self->next[self->nnext] = flowinfo;
self->nnext++;
}
rv = flowinfo->add_func(flowinfo, flow);
} else {
rv = self->misc->add_func(self->misc, flow);
}
if (rv == LAGOPUS_RESULT_OK) {
self->nflow++;
}
return rv;
}
static lagopus_result_t
del_flow_ipv4_dst_mask(struct flowinfo *self, struct flow *flow) {
struct flowinfo *flowinfo;
uint32_t ipv4_dst, mask;
lagopus_result_t rv;
unsigned int i;
rv = get_match_ipv4_dst(&flow->match_list, &ipv4_dst, &mask);
if (rv == LAGOPUS_RESULT_OK) {
rv = LAGOPUS_RESULT_NOT_FOUND;
for (i = 0; i < self->nnext; i++) {
if (self->next[i]->userdata == mask) {
flowinfo = self->next[i];
rv = LAGOPUS_RESULT_OK;
break;
}
}
if (rv == LAGOPUS_RESULT_NOT_FOUND) {
return LAGOPUS_RESULT_NOT_FOUND;
}
rv = flowinfo->del_func(flowinfo, flow);
if (flowinfo->nflow == 0) {
flowinfo->destroy_func(flowinfo);
self->nnext--;
memmove(&self->next[i], &self->next[i + 1], (size_t)(self->nnext - i));
}
} else {
rv = self->misc->del_func(self->misc, flow);
}
if (rv == LAGOPUS_RESULT_OK) {
self->nflow--;
}
return rv;
}
static struct flow *
match_flow_ipv4_dst_mask(struct flowinfo *self, struct lagopus_packet *pkt,
int32_t *pri) {
struct flowinfo *flowinfo;
struct flow *flow[self->nnext], *matched, *alt_flow;
struct flow mismatched = {
.priority = 0,
.flags = 0,
.idle_timeout = 0,
.hard_timeout = 0,
.match_list = {NULL, NULL},
.instruction_list = {NULL, NULL},
.field_bits = 0
};
unsigned int i;
matched = &mismatched;
//#pragma omp parallel for
for (i = 0; i < self->nnext; i++) {
flowinfo = self->next[i];
flow[i] = flowinfo->match_func(flowinfo, pkt, pri);
}
for (i = 0; i < self->nnext; i++) {
if (flow[i] != NULL && flow[i]->priority > matched->priority) {
matched = flow[i];
}
}
alt_flow = self->misc->match_func(self->misc, pkt, pri);
if (alt_flow != NULL) {
matched = alt_flow;
}
if (matched == &mismatched) {
matched = NULL;
}
return matched;
}
static struct flow *
find_flow_ipv4_dst_mask(struct flowinfo *self, struct flow *flow) {
struct flowinfo *flowinfo;
uint32_t ipv4_dst, mask;
lagopus_result_t rv;
unsigned int i;
rv = get_match_ipv4_dst(&flow->match_list, &ipv4_dst, &mask);
if (rv == LAGOPUS_RESULT_OK) {
rv = LAGOPUS_RESULT_NOT_FOUND;
for (i = 0; i < self->nnext; i++) {
if (self->next[i]->userdata == mask) {
flowinfo = self->next[i];
rv = LAGOPUS_RESULT_OK;
break;
}
}
if (rv == LAGOPUS_RESULT_NOT_FOUND) {
return NULL;
}
} else {
flowinfo = self->misc;
}
return flowinfo->find_func(flowinfo, flow);
}
static lagopus_result_t
add_flow_ipv4_dst(struct flowinfo *self, struct flow *flow) {
struct flowinfo *flowinfo;
uint32_t ipv4_dst, mask;
lagopus_result_t rv;
rv = get_match_ipv4_dst(&flow->match_list, &ipv4_dst, &mask);
if (rv == LAGOPUS_RESULT_OK) {
rv = lagopus_hashmap_find_no_lock(&self->hashmap,
(void *)ipv4_dst, (void *)&flowinfo);
if (rv != LAGOPUS_RESULT_OK) {
void *val;
flowinfo = new_flowinfo_ipv4();
val = flowinfo;
lagopus_hashmap_add_no_lock(&self->hashmap, (void *)ipv4_dst,
(void *)&val, false);
}
rv = flowinfo->add_func(flowinfo, flow);
if (rv == LAGOPUS_RESULT_OK) {
self->nflow++;
}
}
return rv;
}
static lagopus_result_t
del_flow_ipv4_dst(struct flowinfo *self, struct flow *flow) {
uint32_t ipv4_dst, mask;
lagopus_result_t rv;
rv = get_match_ipv4_dst(&flow->match_list, &ipv4_dst, &mask);
if (rv == LAGOPUS_RESULT_OK) {
struct flowinfo *flowinfo;
rv = lagopus_hashmap_find_no_lock(&self->hashmap, (void *)ipv4_dst,
(void *)&flowinfo);
if (rv == LAGOPUS_RESULT_OK) {
flowinfo->del_func(flowinfo, flow);
}
if (rv == LAGOPUS_RESULT_OK) {
self->nflow--;
}
}
return rv;
}
static struct flow *
match_flow_ipv4_dst(struct flowinfo *self, struct lagopus_packet *pkt,
int32_t *pri) {
struct flowinfo *flowinfo;
uint32_t ipv4_dst;
struct flow *flow;
lagopus_result_t rv;
flow = NULL;
ipv4_dst = (pkt->ipv4->ip_dst.s_addr & (uint32_t)self->userdata);
rv = lagopus_hashmap_find_no_lock(&self->hashmap, (void *)ipv4_dst,
(void *)&flowinfo);
if (rv == LAGOPUS_RESULT_OK) {
flow = flowinfo->match_func(flowinfo, pkt, pri);
}
return flow;
}
static struct flow *
find_flow_ipv4_dst(struct flowinfo *self, struct flow *flow) {
struct flowinfo *flowinfo;
uint32_t ipv4_dst, mask;
lagopus_result_t rv;
rv = get_match_ipv4_dst(&flow->match_list, &ipv4_dst, &mask);
if (rv == LAGOPUS_RESULT_OK) {
rv = lagopus_hashmap_find_no_lock(&self->hashmap, (void *)ipv4_dst,
(void *)&flowinfo);
if (rv != LAGOPUS_RESULT_OK) {
return NULL;
}
return flowinfo->find_func(flowinfo, flow);
} else {
return self->misc->find_func(self->misc, flow);
}
}
|
convolution_5x5.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv5x5s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*25 + q*25;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* r4 = img0 + w*4;
const float* r5 = img0 + w*5;
const float* k0 = kernel0;
const float* k1 = kernel0 + 5;
const float* k2 = kernel0 + 10;
const float* k3 = kernel0 + 15;
const float* k4 = kernel0 + 20;
int i = 0;
for (; i+1 < outh; i+=2)
{
int remain = outw;
for (; remain>0; remain--)
{
float sum = 0;
float sum2 = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r1[3] * k0[3];
sum2 += r1[4] * k0[4];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r2[3] * k1[3];
sum2 += r2[4] * k1[4];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
sum2 += r3[3] * k2[3];
sum2 += r3[4] * k2[4];
sum2 += r4[0] * k3[0];
sum2 += r4[1] * k3[1];
sum2 += r4[2] * k3[2];
sum2 += r4[3] * k3[3];
sum2 += r4[4] * k3[4];
sum2 += r5[0] * k4[0];
sum2 += r5[1] * k4[1];
sum2 += r5[2] * k4[2];
sum2 += r5[3] * k4[3];
sum2 += r5[4] * k4[4];
*outptr += sum;
*outptr2 += sum2;
r0++;
r1++;
r2++;
r3++;
r4++;
r5++;
outptr++;
outptr2++;
}
r0 += 4 + w;
r1 += 4 + w;
r2 += 4 + w;
r3 += 4 + w;
r4 += 4 + w;
r5 += 4 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
int remain = outw;
for (; remain>0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
*outptr += sum;
r0++;
r1++;
r2++;
r3++;
r4++;
outptr++;
}
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
}
}
}
}
|
Parser.h | //===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Parser interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
#include "clang/AST/Availability.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Frontend/OpenMP/OMPContext.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
#include <memory>
#include <stack>
namespace clang {
class PragmaHandler;
class Scope;
class BalancedDelimiterTracker;
class CorrectionCandidateCallback;
class DeclGroupRef;
class DiagnosticBuilder;
struct LoopHint;
class Parser;
class ParsingDeclRAIIObject;
class ParsingDeclSpec;
class ParsingDeclarator;
class ParsingFieldDeclarator;
class ColonProtectionRAIIObject;
class InMessageExpressionRAIIObject;
class PoisonSEHIdentifiersRAIIObject;
class OMPClause;
class ObjCTypeParamList;
struct OMPTraitProperty;
struct OMPTraitSelector;
struct OMPTraitSet;
class OMPTraitInfo;
/// Parser - This implements a parser for the C family of languages. After
/// parsing units of the grammar, productions are invoked to handle whatever has
/// been read.
///
class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class ParsingOpenMPDirectiveRAII;
friend class InMessageExpressionRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
friend class BalancedDelimiterTracker;
Preprocessor &PP;
/// Tok - The current token we are peeking ahead. All parsing methods assume
/// that this is valid.
Token Tok;
// PrevTokLocation - The location of the token we previously
// consumed. This token is used for diagnostics where we expected to
// see a token following another token (e.g., the ';' at the end of
// a statement).
SourceLocation PrevTokLocation;
/// Tracks an expected type for the current token when parsing an expression.
/// Used by code completion for ranking.
PreferredTypeBuilder PreferredType;
unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0;
unsigned short MisplacedModuleBeginCount = 0;
/// Actions - These are the callbacks we invoke as we parse various constructs
/// in the file.
Sema &Actions;
DiagnosticsEngine &Diags;
/// ScopeCache - Cache scopes to reduce malloc traffic.
enum { ScopeCacheSize = 16 };
unsigned NumCachedScopes;
Scope *ScopeCache[ScopeCacheSize];
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
// __except block
IdentifierInfo *Ident__exception_code,
*Ident___exception_code,
*Ident_GetExceptionCode;
// __except filter expression
IdentifierInfo *Ident__exception_info,
*Ident___exception_info,
*Ident_GetExceptionInfo;
// __finally
IdentifierInfo *Ident__abnormal_termination,
*Ident___abnormal_termination,
*Ident_AbnormalTermination;
/// Contextual keywords for Microsoft extensions.
IdentifierInfo *Ident__except;
mutable IdentifierInfo *Ident_sealed;
mutable IdentifierInfo *Ident_abstract;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
/// Ident_vector, Ident_bool, Ident_Bool - cached IdentifierInfos for "vector"
/// and "bool" fast comparison. Only present if AltiVec or ZVector are
/// enabled.
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_bool;
IdentifierInfo *Ident_Bool;
/// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
/// Only present if AltiVec enabled.
IdentifierInfo *Ident_pixel;
/// Objective-C contextual keywords.
IdentifierInfo *Ident_instancetype;
/// Identifier for "introduced".
IdentifierInfo *Ident_introduced;
/// Identifier for "deprecated".
IdentifierInfo *Ident_deprecated;
/// Identifier for "obsoleted".
IdentifierInfo *Ident_obsoleted;
/// Identifier for "unavailable".
IdentifierInfo *Ident_unavailable;
/// Identifier for "message".
IdentifierInfo *Ident_message;
/// Identifier for "strict".
IdentifierInfo *Ident_strict;
/// Identifier for "replacement".
IdentifierInfo *Ident_replacement;
/// Identifiers used by the 'external_source_symbol' attribute.
IdentifierInfo *Ident_language, *Ident_defined_in,
*Ident_generated_declaration;
/// C++11 contextual keywords.
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_GNU_final;
mutable IdentifierInfo *Ident_override;
// C++2a contextual keywords.
mutable IdentifierInfo *Ident_import;
mutable IdentifierInfo *Ident_module;
// C++ type trait keywords that can be reverted to identifiers and still be
// used as type traits.
llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits;
std::unique_ptr<PragmaHandler> AlignHandler;
std::unique_ptr<PragmaHandler> GCCVisibilityHandler;
std::unique_ptr<PragmaHandler> OptionsHandler;
std::unique_ptr<PragmaHandler> PackHandler;
std::unique_ptr<PragmaHandler> MSStructHandler;
std::unique_ptr<PragmaHandler> UnusedHandler;
std::unique_ptr<PragmaHandler> WeakHandler;
std::unique_ptr<PragmaHandler> RedefineExtnameHandler;
std::unique_ptr<PragmaHandler> FPContractHandler;
std::unique_ptr<PragmaHandler> OpenCLExtensionHandler;
std::unique_ptr<PragmaHandler> OpenMPHandler;
std::unique_ptr<PragmaHandler> PCSectionHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
std::unique_ptr<PragmaHandler> FloatControlHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
std::unique_ptr<PragmaHandler> MSInitSeg;
std::unique_ptr<PragmaHandler> MSDataSeg;
std::unique_ptr<PragmaHandler> MSBSSSeg;
std::unique_ptr<PragmaHandler> MSConstSeg;
std::unique_ptr<PragmaHandler> MSCodeSeg;
std::unique_ptr<PragmaHandler> MSSection;
std::unique_ptr<PragmaHandler> MSRuntimeChecks;
std::unique_ptr<PragmaHandler> MSIntrinsic;
std::unique_ptr<PragmaHandler> MSOptimize;
std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler;
std::unique_ptr<PragmaHandler> OptimizeHandler;
std::unique_ptr<PragmaHandler> LoopHintHandler;
std::unique_ptr<PragmaHandler> UnrollHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollHintHandler;
std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> FPHandler;
std::unique_ptr<PragmaHandler> STDCFenvAccessHandler;
std::unique_ptr<PragmaHandler> STDCFenvRoundHandler;
std::unique_ptr<PragmaHandler> STDCCXLIMITHandler;
std::unique_ptr<PragmaHandler> STDCUnknownHandler;
std::unique_ptr<PragmaHandler> AttributePragmaHandler;
std::unique_ptr<PragmaHandler> MaxTokensHerePragmaHandler;
std::unique_ptr<PragmaHandler> MaxTokensTotalPragmaHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
/// template argument list, where the '>' closes the template
/// argument list.
bool GreaterThanIsOperator;
/// ColonIsSacred - When this is false, we aggressively try to recover from
/// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
/// safe in case statements and a few other things. This is managed by the
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
/// Parsing OpenMP directive mode.
bool OpenMPDirectiveParsing = false;
/// When true, we are directly inside an Objective-C message
/// send expression.
///
/// This is managed by the \c InMessageExpressionRAIIObject class, and
/// should not be set directly.
bool InMessageExpression;
/// Gets set to true after calling ProduceSignatureHelp, it is for a
/// workaround to make sure ProduceSignatureHelp is only called at the deepest
/// function call.
bool CalledSignatureHelp = false;
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
/// Current kind of OpenMP clause
OpenMPClauseKind OMPClauseKind = llvm::omp::OMPC_unknown;
/// RAII class that manages the template parameter depth.
class TemplateParameterDepthRAII {
unsigned &Depth;
unsigned AddedLevels;
public:
explicit TemplateParameterDepthRAII(unsigned &Depth)
: Depth(Depth), AddedLevels(0) {}
~TemplateParameterDepthRAII() {
Depth -= AddedLevels;
}
void operator++() {
++Depth;
++AddedLevels;
}
void addDepth(unsigned D) {
Depth += D;
AddedLevels += D;
}
void setAddedDepth(unsigned D) {
Depth = Depth - AddedLevels + D;
AddedLevels = D;
}
unsigned getDepth() const { return Depth; }
unsigned getOriginalDepth() const { return Depth - AddedLevels; }
};
/// Factory object for creating ParsedAttr objects.
AttributeFactory AttrFactory;
/// Gathers and cleans up TemplateIdAnnotations when parsing of a
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
void MaybeDestroyTemplateIds() {
if (!TemplateIds.empty() &&
(Tok.is(tok::eof) || !PP.mightHavePendingAnnotationTokens()))
DestroyTemplateIds();
}
void DestroyTemplateIds();
/// RAII object to destroy TemplateIdAnnotations where possible, from a
/// likely-good position during parsing.
struct DestroyTemplateIdAnnotationsRAIIObj {
Parser &Self;
DestroyTemplateIdAnnotationsRAIIObj(Parser &Self) : Self(Self) {}
~DestroyTemplateIdAnnotationsRAIIObj() { Self.MaybeDestroyTemplateIds(); }
};
/// Identifiers which have been declared within a tentative parse.
SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
/// Tracker for '<' tokens that might have been intended to be treated as an
/// angle bracket instead of a less-than comparison.
///
/// This happens when the user intends to form a template-id, but typoes the
/// template-name or forgets a 'template' keyword for a dependent template
/// name.
///
/// We track these locations from the point where we see a '<' with a
/// name-like expression on its left until we see a '>' or '>>' that might
/// match it.
struct AngleBracketTracker {
/// Flags used to rank candidate template names when there is more than one
/// '<' in a scope.
enum Priority : unsigned short {
/// A non-dependent name that is a potential typo for a template name.
PotentialTypo = 0x0,
/// A dependent name that might instantiate to a template-name.
DependentName = 0x2,
/// A space appears before the '<' token.
SpaceBeforeLess = 0x0,
/// No space before the '<' token
NoSpaceBeforeLess = 0x1,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName)
};
struct Loc {
Expr *TemplateName;
SourceLocation LessLoc;
AngleBracketTracker::Priority Priority;
unsigned short ParenCount, BracketCount, BraceCount;
bool isActive(Parser &P) const {
return P.ParenCount == ParenCount && P.BracketCount == BracketCount &&
P.BraceCount == BraceCount;
}
bool isActiveOrNested(Parser &P) const {
return isActive(P) || P.ParenCount > ParenCount ||
P.BracketCount > BracketCount || P.BraceCount > BraceCount;
}
};
SmallVector<Loc, 8> Locs;
/// Add an expression that might have been intended to be a template name.
/// In the case of ambiguity, we arbitrarily select the innermost such
/// expression, for example in 'foo < bar < baz', 'bar' is the current
/// candidate. No attempt is made to track that 'foo' is also a candidate
/// for the case where we see a second suspicious '>' token.
void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc,
Priority Prio) {
if (!Locs.empty() && Locs.back().isActive(P)) {
if (Locs.back().Priority <= Prio) {
Locs.back().TemplateName = TemplateName;
Locs.back().LessLoc = LessLoc;
Locs.back().Priority = Prio;
}
} else {
Locs.push_back({TemplateName, LessLoc, Prio,
P.ParenCount, P.BracketCount, P.BraceCount});
}
}
/// Mark the current potential missing template location as having been
/// handled (this happens if we pass a "corresponding" '>' or '>>' token
/// or leave a bracket scope).
void clear(Parser &P) {
while (!Locs.empty() && Locs.back().isActiveOrNested(P))
Locs.pop_back();
}
/// Get the current enclosing expression that might hve been intended to be
/// a template name.
Loc *getCurrent(Parser &P) {
if (!Locs.empty() && Locs.back().isActive(P))
return &Locs.back();
return nullptr;
}
};
AngleBracketTracker AngleBrackets;
IdentifierInfo *getSEHExceptKeyword();
/// True if we are within an Objective-C container while parsing C-like decls.
///
/// This is necessary because Sema thinks we have left the container
/// to parse the C-like decls, meaning Actions.getObjCDeclContext() will
/// be NULL.
bool ParsingInObjCContainer;
/// Whether to skip parsing of function bodies.
///
/// This option can be used, for example, to speed up searches for
/// declarations/definitions when indexing.
bool SkipFunctionBodies;
/// The location of the expression statement that is being parsed right now.
/// Used to determine if an expression that is being parsed is a statement or
/// just a regular sub-expression.
SourceLocation ExprStatementTokLoc;
/// Flags describing a context in which we're parsing a statement.
enum class ParsedStmtContext {
/// This context permits declarations in language modes where declarations
/// are not statements.
AllowDeclarationsInC = 0x1,
/// This context permits standalone OpenMP directives.
AllowStandaloneOpenMPDirectives = 0x2,
/// This context is at the top level of a GNU statement expression.
InStmtExpr = 0x4,
/// The context of a regular substatement.
SubStmt = 0,
/// The context of a compound-statement.
Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives,
LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr)
};
/// Act on an expression statement that might be the last statement in a
/// GNU statement expression. Checks whether we are actually at the end of
/// a statement expression and builds a suitable expression statement.
StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx);
public:
Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
~Parser() override;
const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
AttributeFactory &getAttrFactory() { return AttrFactory; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
void incrementMSManglingNumber() const {
return Actions.incrementMSManglingNumber();
}
Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists;
typedef Sema::FullExprArg FullExprArg;
// Parsing methods.
/// Initialize - Warm up the parser.
///
void Initialize();
/// Parse the first top-level declaration in a translation unit.
bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result);
/// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false);
bool ParseTopLevelDecl() {
DeclGroupPtrTy Result;
return ParseTopLevelDecl(Result);
}
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with special tokens: string literals, code completion,
/// annotation tokens and balanced tokens must be handled using the specific
/// consume methods.
/// Returns the location of the consumed token.
SourceLocation ConsumeToken() {
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
bool TryConsumeToken(tok::TokenKind Expected) {
if (Tok.isNot(Expected))
return false;
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return true;
}
bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) {
if (!TryConsumeToken(Expected))
return false;
Loc = PrevTokLocation;
return true;
}
/// ConsumeAnyToken - Dispatch to the right Consume* method based on the
/// current token type. This should only be used in cases where the type of
/// the token really isn't known, e.g. in error recovery.
SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) {
if (isTokenParen())
return ConsumeParen();
if (isTokenBracket())
return ConsumeBracket();
if (isTokenBrace())
return ConsumeBrace();
if (isTokenStringLiteral())
return ConsumeStringToken();
if (Tok.is(tok::code_completion))
return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken()
: handleUnexpectedCodeCompletionToken();
if (Tok.isAnnotation())
return ConsumeAnnotationToken();
return ConsumeToken();
}
SourceLocation getEndOfPreviousToken() {
return PP.getLocForEndOfToken(PrevTokLocation);
}
/// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds
/// to the given nullability kind.
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) {
return Actions.getNullabilityKeyword(nullability);
}
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
//
/// isTokenParen - Return true if the cur token is '(' or ')'.
bool isTokenParen() const {
return Tok.isOneOf(tok::l_paren, tok::r_paren);
}
/// isTokenBracket - Return true if the cur token is '[' or ']'.
bool isTokenBracket() const {
return Tok.isOneOf(tok::l_square, tok::r_square);
}
/// isTokenBrace - Return true if the cur token is '{' or '}'.
bool isTokenBrace() const {
return Tok.isOneOf(tok::l_brace, tok::r_brace);
}
/// isTokenStringLiteral - True if this token is a string-literal.
bool isTokenStringLiteral() const {
return tok::isStringLiteral(Tok.getKind());
}
/// isTokenSpecial - True if this token requires special consumption methods.
bool isTokenSpecial() const {
return isTokenStringLiteral() || isTokenParen() || isTokenBracket() ||
isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation();
}
/// Returns true if the current token is '=' or is a type of '='.
/// For typos, give a fixit to '='
bool isTokenEqualOrEqualTypo();
/// Return the current token to the token stream and make the given
/// token the current token.
void UnconsumeToken(Token &Consumed) {
Token Next = Tok;
PP.EnterToken(Consumed, /*IsReinject*/true);
PP.Lex(Tok);
PP.EnterToken(Next, /*IsReinject*/true);
}
SourceLocation ConsumeAnnotationToken() {
assert(Tok.isAnnotation() && "wrong consume method");
SourceLocation Loc = Tok.getLocation();
PrevTokLocation = Tok.getAnnotationEndLoc();
PP.Lex(Tok);
return Loc;
}
/// ConsumeParen - This consume method keeps the paren count up-to-date.
///
SourceLocation ConsumeParen() {
assert(isTokenParen() && "wrong consume method");
if (Tok.getKind() == tok::l_paren)
++ParenCount;
else if (ParenCount) {
AngleBrackets.clear(*this);
--ParenCount; // Don't let unbalanced )'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBracket - This consume method keeps the bracket count up-to-date.
///
SourceLocation ConsumeBracket() {
assert(isTokenBracket() && "wrong consume method");
if (Tok.getKind() == tok::l_square)
++BracketCount;
else if (BracketCount) {
AngleBrackets.clear(*this);
--BracketCount; // Don't let unbalanced ]'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBrace - This consume method keeps the brace count up-to-date.
///
SourceLocation ConsumeBrace() {
assert(isTokenBrace() && "wrong consume method");
if (Tok.getKind() == tok::l_brace)
++BraceCount;
else if (BraceCount) {
AngleBrackets.clear(*this);
--BraceCount; // Don't let unbalanced }'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeStringToken - Consume the current 'peek token', lexing a new one
/// and returning the token kind. This method is specific to strings, as it
/// handles string literal concatenation, as per C99 5.1.1.2, translation
/// phase #6.
SourceLocation ConsumeStringToken() {
assert(isTokenStringLiteral() &&
"Should only consume string literals with this method");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// Consume the current code-completion token.
///
/// This routine can be called to consume the code-completion token and
/// continue processing in special cases where \c cutOffParsing() isn't
/// desired, such as token caching or completion with lookahead.
SourceLocation ConsumeCodeCompletionToken() {
assert(Tok.is(tok::code_completion));
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
///\ brief When we are consuming a code-completion token without having
/// matched specific position in the grammar, provide code-completion results
/// based on context.
///
/// \returns the source location of the code-completion token.
SourceLocation handleUnexpectedCodeCompletionToken();
/// Abruptly cut off parsing; mainly used when we have reached the
/// code-completion point.
void cutOffParsing() {
if (PP.isCodeCompletionEnabled())
PP.setCodeCompletionReached();
// Cut off parsing by acting as if we reached the end-of-file.
Tok.setKind(tok::eof);
}
/// Determine if we're at the end of the file or at a transition
/// between modules.
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
Kind == tok::annot_module_end || Kind == tok::annot_module_include;
}
/// Checks if the \p Level is valid for use in a fold expression.
bool isFoldOperator(prec::Level Level) const;
/// Checks if the \p Kind is a valid operator for fold expressions.
bool isFoldOperator(tok::TokenKind Kind) const;
/// Initialize all pragma handlers.
void initializePragmaHandlers();
/// Destroy and reset all pragma handlers.
void resetPragmaHandlers();
/// Handle the annotation token produced for #pragma unused(...)
void HandlePragmaUnused();
/// Handle the annotation token produced for
/// #pragma GCC visibility...
void HandlePragmaVisibility();
/// Handle the annotation token produced for
/// #pragma pack...
void HandlePragmaPack();
/// Handle the annotation token produced for
/// #pragma ms_struct...
void HandlePragmaMSStruct();
void HandlePragmaMSPointersToMembers();
void HandlePragmaMSVtorDisp();
void HandlePragmaMSPragma();
bool HandlePragmaMSSection(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSSegment(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSInitSeg(StringRef PragmaName,
SourceLocation PragmaLocation);
/// Handle the annotation token produced for
/// #pragma align...
void HandlePragmaAlign();
/// Handle the annotation token produced for
/// #pragma clang __debug dump...
void HandlePragmaDump();
/// Handle the annotation token produced for
/// #pragma weak id...
void HandlePragmaWeak();
/// Handle the annotation token produced for
/// #pragma weak id = id...
void HandlePragmaWeakAlias();
/// Handle the annotation token produced for
/// #pragma redefine_extname...
void HandlePragmaRedefineExtname();
/// Handle the annotation token produced for
/// #pragma STDC FP_CONTRACT...
void HandlePragmaFPContract();
/// Handle the annotation token produced for
/// #pragma STDC FENV_ACCESS...
void HandlePragmaFEnvAccess();
/// Handle the annotation token produced for
/// #pragma STDC FENV_ROUND...
void HandlePragmaFEnvRound();
/// Handle the annotation token produced for
/// #pragma float_control
void HandlePragmaFloatControl();
/// \brief Handle the annotation token produced for
/// #pragma clang fp ...
void HandlePragmaFP();
/// Handle the annotation token produced for
/// #pragma OPENCL EXTENSION...
void HandlePragmaOpenCLExtension();
/// Handle the annotation token produced for
/// #pragma clang __debug captured
StmtResult HandlePragmaCaptured();
/// Handle the annotation token produced for
/// #pragma clang loop and #pragma unroll.
bool HandlePragmaLoopHint(LoopHint &Hint);
bool ParsePragmaAttributeSubjectMatchRuleSet(
attr::ParsedSubjectMatchRuleSet &SubjectMatchRules,
SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc);
void HandlePragmaAttribute();
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
///
/// Note that this differs from the Preprocessor's LookAhead method, because
/// the Parser always has one token lexed that the preprocessor doesn't.
///
const Token &GetLookAheadToken(unsigned N) {
if (N == 0 || Tok.is(tok::eof)) return Tok;
return PP.LookAhead(N-1);
}
public:
/// NextToken - This peeks ahead one token and returns it without
/// consuming it.
const Token &NextToken() {
return PP.LookAhead(0);
}
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static TypeResult getTypeAnnotation(const Token &Tok) {
if (!Tok.getAnnotationValue())
return TypeError();
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
private:
static void setTypeAnnotation(Token &Tok, TypeResult T) {
assert((T.isInvalid() || T.get()) &&
"produced a valid-but-null type annotation?");
Tok.setAnnotationValue(T.isInvalid() ? nullptr : T.get().getAsOpaquePtr());
}
static NamedDecl *getNonTypeAnnotation(const Token &Tok) {
return static_cast<NamedDecl*>(Tok.getAnnotationValue());
}
static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) {
Tok.setAnnotationValue(ND);
}
static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) {
return static_cast<IdentifierInfo*>(Tok.getAnnotationValue());
}
static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) {
Tok.setAnnotationValue(ND);
}
/// Read an already-translated primary expression out of an annotation
/// token.
static ExprResult getExprAnnotation(const Token &Tok) {
return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue());
}
/// Set the primary expression corresponding to the given annotation
/// token.
static void setExprAnnotation(Token &Tok, ExprResult ER) {
Tok.setAnnotationValue(ER.getAsOpaquePointer());
}
public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
bool TryAnnotateTypeOrScopeToken();
bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
bool IsNewScope);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
bool MightBeCXXScopeToken() {
return Tok.is(tok::identifier) || Tok.is(tok::coloncolon) ||
(Tok.is(tok::annot_template_id) &&
NextToken().is(tok::coloncolon)) ||
Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super);
}
bool TryAnnotateOptionalCXXScopeToken(bool EnteringContext = false) {
return MightBeCXXScopeToken() && TryAnnotateCXXScopeToken(EnteringContext);
}
private:
enum AnnotatedNameKind {
/// Annotation has failed and emitted an error.
ANK_Error,
/// The identifier is a tentatively-declared name.
ANK_TentativeDecl,
/// The identifier is a template name. FIXME: Add an annotation for that.
ANK_TemplateName,
/// The identifier can't be resolved.
ANK_Unresolved,
/// Annotation was successful.
ANK_Success
};
AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr);
/// Push a tok::annot_cxxscope token onto the token stream.
void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
/// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
/// replacing them with the non-context-sensitive keywords. This returns
/// true if the token was replaced.
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
if (!getLangOpts().AltiVec && !getLangOpts().ZVector)
return false;
if (Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_bool &&
Tok.getIdentifierInfo() != Ident_Bool &&
(!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
}
/// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
bool TryAltiVecVectorTokenOutOfLine();
bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid);
/// Returns true if the current token is the identifier 'instancetype'.
///
/// Should only be used in Objective-C language modes.
bool isObjCInstancetype() {
assert(getLangOpts().ObjC);
if (Tok.isAnnotation())
return false;
if (!Ident_instancetype)
Ident_instancetype = PP.getIdentifierInfo("instancetype");
return Tok.getIdentifierInfo() == Ident_instancetype;
}
/// TryKeywordIdentFallback - For compatibility with system headers using
/// keywords as identifiers, attempt to convert the current token to an
/// identifier and optionally disable the keyword for the remainder of the
/// translation unit. This returns false if the token was not replaced,
/// otherwise emits a diagnostic and returns true.
bool TryKeywordIdentFallback(bool DisableKeyword);
/// Get the TemplateIdAnnotation from the token.
TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok);
/// TentativeParsingAction - An object that is used as a kind of "tentative
/// parsing transaction". It gets instantiated to mark the token position and
/// after the token consumption is done, Commit() or Revert() is called to
/// either "commit the consumed tokens" or revert to the previously marked
/// token position. Example:
///
/// TentativeParsingAction TPA(*this);
/// ConsumeToken();
/// ....
/// TPA.Revert();
///
class TentativeParsingAction {
Parser &P;
PreferredTypeBuilder PrevPreferredType;
Token PrevTok;
size_t PrevTentativelyDeclaredIdentifierCount;
unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount;
bool isActive;
public:
explicit TentativeParsingAction(Parser &p)
: P(p), PrevPreferredType(P.PreferredType) {
PrevTok = P.Tok;
PrevTentativelyDeclaredIdentifierCount =
P.TentativelyDeclaredIdentifiers.size();
PrevParenCount = P.ParenCount;
PrevBracketCount = P.BracketCount;
PrevBraceCount = P.BraceCount;
P.PP.EnableBacktrackAtThisPos();
isActive = true;
}
void Commit() {
assert(isActive && "Parsing action was finished!");
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.PP.CommitBacktrackedTokens();
isActive = false;
}
void Revert() {
assert(isActive && "Parsing action was finished!");
P.PP.Backtrack();
P.PreferredType = PrevPreferredType;
P.Tok = PrevTok;
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.ParenCount = PrevParenCount;
P.BracketCount = PrevBracketCount;
P.BraceCount = PrevBraceCount;
isActive = false;
}
~TentativeParsingAction() {
assert(!isActive && "Forgot to call Commit or Revert!");
}
};
/// A TentativeParsingAction that automatically reverts in its destructor.
/// Useful for disambiguation parses that will always be reverted.
class RevertingTentativeParsingAction
: private Parser::TentativeParsingAction {
public:
RevertingTentativeParsingAction(Parser &P)
: Parser::TentativeParsingAction(P) {}
~RevertingTentativeParsingAction() { Revert(); }
};
class UnannotatedTentativeParsingAction;
/// ObjCDeclContextSwitch - An object used to switch context from
/// an objective-c decl context to its enclosing decl context and
/// back.
class ObjCDeclContextSwitch {
Parser &P;
Decl *DC;
SaveAndRestore<bool> WithinObjCContainer;
public:
explicit ObjCDeclContextSwitch(Parser &p)
: P(p), DC(p.getObjCDeclContext()),
WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) {
if (DC)
P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
~ObjCDeclContextSwitch() {
if (DC)
P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
}
};
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
/// input. If so, it is consumed and false is returned.
///
/// If a trivial punctuator misspelling is encountered, a FixIt error
/// diagnostic is issued and false is returned after recovery.
///
/// If the input is malformed, this emits the specified diagnostic and true is
/// returned.
bool ExpectAndConsume(tok::TokenKind ExpectedTok,
unsigned Diag = diag::err_expected,
StringRef DiagMsg = "");
/// The parser expects a semicolon and, if present, will consume it.
///
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
/// The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
OutsideFunction = 0,
InsideStruct = 1,
InstanceVariableList = 2,
AfterMemberFunctionDefinition = 3
};
/// Consume any extra semi-colons until the end of the line.
void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified);
/// Return false if the next token is an identifier. An 'expected identifier'
/// error is emitted otherwise.
///
/// The parser tries to recover from the error by checking if the next token
/// is a C++ keyword when parsing Objective-C++. Return false if the recovery
/// was successful.
bool expectIdentifier();
/// Kinds of compound pseudo-tokens formed by a sequence of two real tokens.
enum class CompoundToken {
/// A '(' '{' beginning a statement-expression.
StmtExprBegin,
/// A '}' ')' ending a statement-expression.
StmtExprEnd,
/// A '[' '[' beginning a C++11 or C2x attribute.
AttrBegin,
/// A ']' ']' ending a C++11 or C2x attribute.
AttrEnd,
/// A '::' '*' forming a C++ pointer-to-member declaration.
MemberPtr,
};
/// Check that a compound operator was written in a "sensible" way, and warn
/// if not.
void checkCompoundToken(SourceLocation FirstTokLoc,
tok::TokenKind FirstTokKind, CompoundToken Op);
public:
//===--------------------------------------------------------------------===//
// Scope manipulation
/// ParseScope - Introduces a new scope for parsing. The kind of
/// scope is determined by ScopeFlags. Objects of this type should
/// be created on the stack to coincide with the position where the
/// parser enters the new scope, and this object's constructor will
/// create that new scope. Similarly, once the object is destroyed
/// the parser will exit the scope.
class ParseScope {
Parser *Self;
ParseScope(const ParseScope &) = delete;
void operator=(const ParseScope &) = delete;
public:
// ParseScope - Construct a new object to manage a scope in the
// parser Self where the new Scope is created with the flags
// ScopeFlags, but only when we aren't about to enter a compound statement.
ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true,
bool BeforeCompoundStmt = false)
: Self(Self) {
if (EnteredScope && !BeforeCompoundStmt)
Self->EnterScope(ScopeFlags);
else {
if (BeforeCompoundStmt)
Self->incrementMSManglingNumber();
this->Self = nullptr;
}
}
// Exit - Exit the scope associated with this object now, rather
// than waiting until the object is destroyed.
void Exit() {
if (Self) {
Self->ExitScope();
Self = nullptr;
}
}
~ParseScope() {
Exit();
}
};
/// Introduces zero or more scopes for parsing. The scopes will all be exited
/// when the object is destroyed.
class MultiParseScope {
Parser &Self;
unsigned NumScopes = 0;
MultiParseScope(const MultiParseScope&) = delete;
public:
MultiParseScope(Parser &Self) : Self(Self) {}
void Enter(unsigned ScopeFlags) {
Self.EnterScope(ScopeFlags);
++NumScopes;
}
void Exit() {
while (NumScopes) {
Self.ExitScope();
--NumScopes;
}
}
~MultiParseScope() {
Exit();
}
};
/// EnterScope - Start a new scope.
void EnterScope(unsigned ScopeFlags);
/// ExitScope - Pop a scope off the scope stack.
void ExitScope();
/// Re-enter the template scopes for a declaration that might be a template.
unsigned ReenterTemplateScopes(MultiParseScope &S, Decl *D);
private:
/// RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
unsigned OldFlags;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
public:
ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
~ParseScopeFlags();
};
//===--------------------------------------------------------------------===//
// Diagnostic Emission and Error recovery.
public:
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
DiagnosticBuilder Diag(unsigned DiagID) {
return Diag(Tok, DiagID);
}
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange);
void CheckNestedObjCContexts(SourceLocation AtLoc);
public:
/// Control flags for SkipUntil functions.
enum SkipUntilFlags {
StopAtSemi = 1 << 0, ///< Stop skipping at semicolon
/// Stop skipping at specified token, but don't skip the token itself
StopBeforeMatch = 1 << 1,
StopAtCodeCompletion = 1 << 2 ///< Stop at code completion
};
friend constexpr SkipUntilFlags operator|(SkipUntilFlags L,
SkipUntilFlags R) {
return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) |
static_cast<unsigned>(R));
}
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless StopBeforeMatch is specified). Because we cannot guarantee
/// that the token will ever occur, this skips to the next token, or to some
/// likely good stopping point. If Flags has StopAtSemi flag, skipping will
/// stop at a ';' character. Balances (), [], and {} delimiter tokens while
/// skipping.
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool SkipUntil(tok::TokenKind T,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
return SkipUntil(llvm::makeArrayRef(T), Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2, T3};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(ArrayRef<tok::TokenKind> Toks,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0));
/// SkipMalformedDecl - Read tokens until we get to some likely good stopping
/// point for skipping past a simple-declaration.
void SkipMalformedDecl();
/// The location of the first statement inside an else that might
/// have a missleading indentation. If there is no
/// MisleadingIndentationChecker on an else active, this location is invalid.
SourceLocation MisleadingIndentationElseLoc;
private:
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
struct ParsingClass;
/// [class.mem]p1: "... the class is regarded as complete within
/// - function bodies
/// - default arguments
/// - exception-specifications (TODO: C++0x)
/// - and brace-or-equal-initializers for non-static data members
/// (including such things in nested classes)."
/// LateParsedDeclarations build the tree of those elements so they can
/// be parsed after parsing the top-level class.
class LateParsedDeclaration {
public:
virtual ~LateParsedDeclaration();
virtual void ParseLexedMethodDeclarations();
virtual void ParseLexedMemberInitializers();
virtual void ParseLexedMethodDefs();
virtual void ParseLexedAttributes();
virtual void ParseLexedPragmas();
};
/// Inner node of the LateParsedDeclaration tree that parses
/// all its members recursively.
class LateParsedClass : public LateParsedDeclaration {
public:
LateParsedClass(Parser *P, ParsingClass *C);
~LateParsedClass() override;
void ParseLexedMethodDeclarations() override;
void ParseLexedMemberInitializers() override;
void ParseLexedMethodDefs() override;
void ParseLexedAttributes() override;
void ParseLexedPragmas() override;
private:
Parser *Self;
ParsingClass *Class;
};
/// Contains the lexed tokens of an attribute with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
/// FIXME: Perhaps we should change the name of LateParsedDeclaration to
/// LateParsedTokens.
struct LateParsedAttribute : public LateParsedDeclaration {
Parser *Self;
CachedTokens Toks;
IdentifierInfo &AttrName;
IdentifierInfo *MacroII = nullptr;
SourceLocation AttrNameLoc;
SmallVector<Decl*, 2> Decls;
explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
SourceLocation Loc)
: Self(P), AttrName(Name), AttrNameLoc(Loc) {}
void ParseLexedAttributes() override;
void addDecl(Decl *D) { Decls.push_back(D); }
};
/// Contains the lexed tokens of a pragma with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
class LateParsedPragma : public LateParsedDeclaration {
Parser *Self = nullptr;
AccessSpecifier AS = AS_none;
CachedTokens Toks;
public:
explicit LateParsedPragma(Parser *P, AccessSpecifier AS)
: Self(P), AS(AS) {}
void takeToks(CachedTokens &Cached) { Toks.swap(Cached); }
const CachedTokens &toks() const { return Toks; }
AccessSpecifier getAccessSpecifier() const { return AS; }
void ParseLexedPragmas() override;
};
// A list of late-parsed attributes. Used by ParseGNUAttributes.
class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> {
public:
LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { }
bool parseSoon() { return ParseSoon; }
private:
bool ParseSoon; // Are we planning to parse these shortly after creation?
};
/// Contains the lexed tokens of a member function definition
/// which needs to be parsed at the end of the class declaration
/// after parsing all other member declarations.
struct LexedMethod : public LateParsedDeclaration {
Parser *Self;
Decl *D;
CachedTokens Toks;
explicit LexedMethod(Parser *P, Decl *MD) : Self(P), D(MD) {}
void ParseLexedMethodDefs() override;
};
/// LateParsedDefaultArgument - Keeps track of a parameter that may
/// have a default argument that cannot be parsed yet because it
/// occurs within a member function declaration inside the class
/// (C++ [class.mem]p2).
struct LateParsedDefaultArgument {
explicit LateParsedDefaultArgument(Decl *P,
std::unique_ptr<CachedTokens> Toks = nullptr)
: Param(P), Toks(std::move(Toks)) { }
/// Param - The parameter declaration for this parameter.
Decl *Param;
/// Toks - The sequence of tokens that comprises the default
/// argument expression, not including the '=' or the terminating
/// ')' or ','. This will be NULL for parameters that have no
/// default argument.
std::unique_ptr<CachedTokens> Toks;
};
/// LateParsedMethodDeclaration - A method declaration inside a class that
/// contains at least one entity whose parsing needs to be delayed
/// until the class itself is completely-defined, such as a default
/// argument (C++ [class.mem]p2).
struct LateParsedMethodDeclaration : public LateParsedDeclaration {
explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
: Self(P), Method(M), ExceptionSpecTokens(nullptr) {}
void ParseLexedMethodDeclarations() override;
Parser *Self;
/// Method - The method declaration.
Decl *Method;
/// DefaultArgs - Contains the parameters of the function and
/// their default arguments. At least one of the parameters will
/// have a default argument, but all of the parameters of the
/// method will be stored so that they can be reintroduced into
/// scope at the appropriate times.
SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
/// The set of tokens that make up an exception-specification that
/// has not yet been parsed.
CachedTokens *ExceptionSpecTokens;
};
/// LateParsedMemberInitializer - An initializer for a non-static class data
/// member whose parsing must to be delayed until the class is completely
/// defined (C++11 [class.mem]p2).
struct LateParsedMemberInitializer : public LateParsedDeclaration {
LateParsedMemberInitializer(Parser *P, Decl *FD)
: Self(P), Field(FD) { }
void ParseLexedMemberInitializers() override;
Parser *Self;
/// Field - The field declaration.
Decl *Field;
/// CachedTokens - The sequence of tokens that comprises the initializer,
/// including any leading '='.
CachedTokens Toks;
};
/// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
/// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definition is completed (C++ [class.mem]p2),
/// the method declarations and possibly attached inline definitions
/// will be stored here with the tokens that will be parsed to create those
/// entities.
typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
/// Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
: TopLevelClass(TopLevelClass), IsInterface(IsInterface),
TagOrTemplate(TagOrTemplate) {}
/// Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
bool TopLevelClass : 1;
/// Whether this class is an __interface.
bool IsInterface : 1;
/// The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
/// LateParsedDeclarations - Method declarations, inline definitions and
/// nested classes that contain pieces whose parsing will be delayed until
/// the top-level class is fully defined.
LateParsedDeclarationsContainer LateParsedDeclarations;
};
/// The stack of classes that is currently being
/// parsed. Nested and local classes will be pushed onto this stack
/// when they are parsed, and removed afterward.
std::stack<ParsingClass *> ClassStack;
ParsingClass &getCurrentClass() {
assert(!ClassStack.empty() && "No lexed method stacks!");
return *ClassStack.top();
}
/// RAII object used to manage the parsing of a class definition.
class ParsingClassDefinition {
Parser &P;
bool Popped;
Sema::ParsingClassState State;
public:
ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass,
bool IsInterface)
: P(P), Popped(false),
State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) {
}
/// Pop this class of the stack.
void Pop() {
assert(!Popped && "Nested class has already been popped");
Popped = true;
P.PopParsingClass(State);
}
~ParsingClassDefinition() {
if (!Popped)
P.PopParsingClass(State);
}
};
/// Contains information about any template-specific
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
ParsedTemplateInfo()
: Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { }
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
bool lastParameterListWasEmpty = false)
: Kind(isSpecialization? ExplicitSpecialization : Template),
TemplateParams(TemplateParams),
LastParameterListWasEmpty(lastParameterListWasEmpty) { }
explicit ParsedTemplateInfo(SourceLocation ExternLoc,
SourceLocation TemplateLoc)
: Kind(ExplicitInstantiation), TemplateParams(nullptr),
ExternLoc(ExternLoc), TemplateLoc(TemplateLoc),
LastParameterListWasEmpty(false){ }
/// The kind of template we are parsing.
enum {
/// We are not parsing a template at all.
NonTemplate = 0,
/// We are parsing a template declaration.
Template,
/// We are parsing an explicit specialization.
ExplicitSpecialization,
/// We are parsing an explicit instantiation.
ExplicitInstantiation
} Kind;
/// The template parameter lists, for template declarations
/// and explicit specializations.
TemplateParameterLists *TemplateParams;
/// The location of the 'extern' keyword, if any, for an explicit
/// instantiation
SourceLocation ExternLoc;
/// The location of the 'template' keyword, for an explicit
/// instantiation.
SourceLocation TemplateLoc;
/// Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
SourceRange getSourceRange() const LLVM_READONLY;
};
// In ParseCXXInlineMethods.cpp.
struct ReenterTemplateScopeRAII;
struct ReenterClassScopeRAII;
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT);
static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT);
Sema::ParsingClassState
PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
void DeallocateParsedClasses(ParsingClass *Class);
void PopParsingClass(Sema::ParsingClassState);
enum CachedInitKind {
CIK_DefaultArgument,
CIK_DefaultInitializer
};
NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS,
ParsedAttributes &AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
const VirtSpecifiers &VS,
SourceLocation PureSpecLoc);
void ParseCXXNonStaticMemberInitializer(Decl *VarD);
void ParseLexedAttributes(ParsingClass &Class);
void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition);
void ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
void ParseLexedMethodDef(LexedMethod &LM);
void ParseLexedMemberInitializers(ParsingClass &Class);
void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod);
void ParseLexedPragmas(ParsingClass &Class);
void ParseLexedPragma(LateParsedPragma &LP);
bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK);
bool ConsumeAndStoreConditional(CachedTokens &Toks);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true) {
return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken);
}
bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true);
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr,
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
ParsingDeclSpec &DS,
AccessSpecifier AS);
void SkipFunctionBody();
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
LateParsedAttrList *LateParsedAttrs = nullptr);
void ParseKNRParamDeclarations(Declarator &D);
// EndLoc is filled with the location of the last token of the simple-asm.
ExprResult ParseSimpleAsm(bool ForAsmLabel, SourceLocation *EndLoc);
ExprResult ParseAsmStringLiteral(bool ForAsmLabel);
// Objective-C External Declarations
void MaybeSkipAttributes(tok::ObjCKeywordKind Kind);
DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs);
DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
class ObjCTypeParamListScope;
ObjCTypeParamList *parseObjCTypeParamList();
ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs(
ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
SmallVectorImpl<IdentifierLocPair> &protocolIdents,
SourceLocation &rAngleLoc, bool mayBeProtocolList = true);
void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
BalancedDelimiterTracker &T,
SmallVectorImpl<Decl *> &AllIvarDecls,
bool RBraceMissing);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
SmallVectorImpl<SourceLocation> &PLocs,
bool WarnOnDeclarations,
bool ForObjCContainer,
SourceLocation &LAngleLoc,
SourceLocation &EndProtoLoc,
bool consumeLastToken);
/// Parse the first angle-bracket-delimited clause for an
/// Objective-C object or object pointer type, which may be either
/// type arguments or protocol qualifiers.
void parseObjCTypeArgsOrProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken,
bool warnOnIncompleteProtocols);
/// Parse either Objective-C type arguments or protocol qualifiers; if the
/// former, also parse protocol qualifiers afterward.
void parseObjCTypeArgsAndProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken);
/// Parse a protocol qualifier type such as '<NSCopying>', which is
/// an anachronistic way of writing 'id<NSCopying>'.
TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc);
/// Parse Objective-C type arguments and protocol qualifiers, extending the
/// current type with the parsed result.
TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc,
ParsedType type,
bool consumeLastToken,
SourceLocation &endLoc);
void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl);
DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
ParsedAttributes &prefixAttrs);
struct ObjCImplParsingDataRAII {
Parser &P;
Decl *Dcl;
bool HasCFunction;
typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
LateParsedObjCMethodContainer LateParsedObjCMethods;
ObjCImplParsingDataRAII(Parser &parser, Decl *D)
: P(parser), Dcl(D), HasCFunction(false) {
P.CurParsedObjCImpl = this;
Finished = false;
}
~ObjCImplParsingDataRAII();
void finish(SourceRange AtEnd);
bool isFinished() const { return Finished; }
private:
bool Finished;
};
ObjCImplParsingDataRAII *CurParsedObjCImpl;
void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl);
DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc,
ParsedAttributes &Attrs);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
Decl *ParseObjCPropertyDynamic(SourceLocation atLoc);
IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation);
// Definitions for Objective-c context sensitive keywords recognition.
enum ObjCTypeQual {
objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref,
objc_nonnull, objc_nullable, objc_null_unspecified,
objc_NumQuals
};
IdentifierInfo *ObjCTypeQuals[objc_NumQuals];
bool isTokIdentifier_in() const;
ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx,
ParsedAttributes *ParamAttrs);
Decl *ParseObjCMethodPrototype(
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition = true);
Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition=true);
void ParseObjCPropertyAttribute(ObjCDeclSpec &DS);
Decl *ParseObjCMethodDefinition();
public:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
/// TypeCastState - State whether an expression is or may be a type cast.
enum TypeCastState {
NotTypeCast = 0,
MaybeTypeCast,
IsTypeCast
};
ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpressionInExprEvalContext(
TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseCaseExpression(SourceLocation CaseLoc);
ExprResult ParseConstraintExpression();
ExprResult
ParseConstraintLogicalAndExpression(bool IsTrailingRequiresClause);
ExprResult ParseConstraintLogicalOrExpression(bool IsTrailingRequiresClause);
// Expr that doesn't include commas.
ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
unsigned &NumLineToksConsumed,
bool IsUnevaluated);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS,
prec::Level MinPrec);
/// Control what ParseCastExpression will parse.
enum CastParseKind {
AnyCastExpr = 0,
UnaryExprOnly,
PrimaryExprOnly
};
ExprResult ParseCastExpression(CastParseKind ParseKind,
bool isAddressOfOperand,
bool &NotCastExpr,
TypeCastState isTypeCast,
bool isVectorLiteral = false,
bool *NotPrimaryExpression = nullptr);
ExprResult ParseCastExpression(CastParseKind ParseKind,
bool isAddressOfOperand = false,
TypeCastState isTypeCast = NotTypeCast,
bool isVectorLiteral = false,
bool *NotPrimaryExpression = nullptr);
/// Returns true if the next token cannot start an expression.
bool isNotExpressionStart();
/// Returns true if the next token would start a postfix-expression
/// suffix.
bool isPostfixExpressionSuffixStart() {
tok::TokenKind K = Tok.getKind();
return (K == tok::l_square || K == tok::l_paren ||
K == tok::period || K == tok::arrow ||
K == tok::plusplus || K == tok::minusminus);
}
bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less);
void checkPotentialAngleBracket(ExprResult &PotentialTemplateName);
bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &,
const Token &OpToken);
bool checkPotentialAngleBracketDelimiter(const Token &OpToken) {
if (auto *Info = AngleBrackets.getCurrent(*this))
return checkPotentialAngleBracketDelimiter(*Info, OpToken);
return false;
}
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
ExprResult ParseSYCLUniqueStableNameExpression();
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
ParsedType &CastTy,
SourceRange &CastRange);
typedef SmallVector<SourceLocation, 20> CommaLocsTy;
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
llvm::function_ref<void()> ExpressionStarts =
llvm::function_ref<void()>());
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
/// used for misc language extensions.
bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs);
/// ParenParseOption - Control what ParseParenExpression will parse.
enum ParenParseOption {
SimpleExpr, // Only parse '(' expression ')'
FoldExpr, // Also allow fold-expression <anything>
CompoundStmt, // Also allow '(' compound-statement ')'
CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}'
CastExpr // Also allow '(' type-name ')' <anything>
};
ExprResult ParseParenExpression(ParenParseOption &ExprType,
bool stopIfCastExpr,
bool isTypeCast,
ParsedType &CastTy,
SourceLocation &RParenLoc);
ExprResult ParseCXXAmbiguousParenExpression(
ParenParseOption &ExprType, ParsedType &CastTy,
BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt);
ExprResult ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
ExprResult ParseGenericSelectionExpression();
ExprResult ParseObjCBoolLiteral();
ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T);
//===--------------------------------------------------------------------===//
// C++ Expressions
ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
Token &Replacement);
ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
bool areTokensAdjacent(const Token &A, const Token &B);
void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
ParsedType ObjectType,
bool ObjectHasErrors,
bool EnteringContext,
bool *MayBePseudoDestructor = nullptr,
bool IsTypename = false,
IdentifierInfo **LastII = nullptr,
bool OnlyNamespace = false,
bool InUsingDeclaration = false);
//===--------------------------------------------------------------------===//
// C++11 5.1.2: Lambda expressions
/// Result of tentatively parsing a lambda-introducer.
enum class LambdaIntroducerTentativeParse {
/// This appears to be a lambda-introducer, which has been fully parsed.
Success,
/// This is a lambda-introducer, but has not been fully parsed, and this
/// function needs to be called again to parse it.
Incomplete,
/// This is definitely an Objective-C message send expression, rather than
/// a lambda-introducer, attribute-specifier, or array designator.
MessageSend,
/// This is not a lambda-introducer.
Invalid,
};
// [...] () -> type {...}
ExprResult ParseLambdaExpression();
ExprResult TryParseLambdaExpression();
bool
ParseLambdaIntroducer(LambdaIntroducer &Intro,
LambdaIntroducerTentativeParse *Tentative = nullptr);
ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro);
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Casts
ExprResult ParseCXXCasts();
/// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast.
ExprResult ParseBuiltinBitCast();
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Type Identification
ExprResult ParseCXXTypeid();
//===--------------------------------------------------------------------===//
// C++ : Microsoft __uuidof Expression
ExprResult ParseCXXUuidof();
//===--------------------------------------------------------------------===//
// C++ 5.2.4: C++ Pseudo-Destructor Expressions
ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType);
//===--------------------------------------------------------------------===//
// C++ 9.3.2: C++ 'this' pointer
ExprResult ParseCXXThis();
//===--------------------------------------------------------------------===//
// C++ 15: C++ Throw Expression
ExprResult ParseThrowExpression();
ExceptionSpecificationType tryParseExceptionSpecification(
bool Delayed,
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &DynamicExceptions,
SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
ExprResult &NoexceptExpr,
CachedTokens *&ExceptionSpecTokens);
// EndLoc is filled with the location of the last token of the specification.
ExceptionSpecificationType ParseDynamicExceptionSpecification(
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &Exceptions,
SmallVectorImpl<SourceRange> &Ranges);
//===--------------------------------------------------------------------===//
// C++0x 8: Function declaration trailing-return-type
TypeResult ParseTrailingReturnType(SourceRange &Range,
bool MayBeFollowedByDirectInit);
//===--------------------------------------------------------------------===//
// C++ 2.13.5: C++ Boolean Literals
ExprResult ParseCXXBoolLiteral();
//===--------------------------------------------------------------------===//
// C++ 5.2.3: Explicit type conversion (functional notation)
ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
//===--------------------------------------------------------------------===//
// C++ 5.3.4 and 5.3.5: C++ new and delete
bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs,
Declarator &D);
void ParseDirectNewDeclarator(Declarator &D);
ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start);
ExprResult ParseCXXDeleteExpression(bool UseGlobal,
SourceLocation Start);
//===--------------------------------------------------------------------===//
// C++ if/switch/while/for condition expression.
struct ForRangeInfo;
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK,
ForRangeInfo *FRI = nullptr,
bool EnterForConditionScope = false);
//===--------------------------------------------------------------------===//
// C++ Coroutines
ExprResult ParseCoyieldExpression();
//===--------------------------------------------------------------------===//
// C++ Concepts
ExprResult ParseRequiresExpression();
void ParseTrailingRequiresClause(Declarator &D);
//===--------------------------------------------------------------------===//
// C99 6.7.8: Initialization.
/// ParseInitializer
/// initializer: [C99 6.7.8]
/// assignment-expression
/// '{' ...
ExprResult ParseInitializer() {
if (Tok.isNot(tok::l_brace))
return ParseAssignmentExpression();
return ParseBraceInitializer();
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
struct DesignatorCompletionInfo {
SmallVectorImpl<Expr *> &InitExprs;
QualType PreferredBaseType;
};
ExprResult ParseInitializerWithPotentialDesignator(DesignatorCompletionInfo);
//===--------------------------------------------------------------------===//
// clang Expressions
ExprResult ParseBlockLiteralExpression(); // ^{...}
//===--------------------------------------------------------------------===//
// Objective-C Expressions
ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
bool isSimpleObjCMessageExpression();
ExprResult ParseObjCMessageExpression();
ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
Expr *ReceiverExpr);
ExprResult ParseAssignmentExprWithObjCMessageExprStart(
SourceLocation LBracloc, SourceLocation SuperLoc,
ParsedType ReceiverType, Expr *ReceiverExpr);
bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
/// A SmallVector of statements, with stack size 32 (as that is the only one
/// used.)
typedef SmallVector<Stmt*, 32> StmtVector;
/// A SmallVector of expressions, with stack size 12 (the maximum used.)
typedef SmallVector<Expr*, 12> ExprVector;
/// A SmallVector of types.
typedef SmallVector<ParsedType, 12> TypeVector;
StmtResult
ParseStatement(SourceLocation *TrailingElseLoc = nullptr,
ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt);
StmtResult ParseStatementOrDeclaration(
StmtVector &Stmts, ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc = nullptr);
StmtResult ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
StmtResult ParseExprStatement(ParsedStmtContext StmtCtx);
StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs,
ParsedStmtContext StmtCtx);
StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx,
bool MissingCase = false,
ExprResult Expr = ExprResult());
StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx);
StmtResult ParseCompoundStatement(bool isStmtExpr = false);
StmtResult ParseCompoundStatement(bool isStmtExpr,
unsigned ScopeFlags);
void ParseCompoundStatementLeadingPragmas();
bool ConsumeNullStmt(StmtVector &Stmts);
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
SourceLocation Loc, Sema::ConditionKind CK,
SourceLocation *LParenLoc = nullptr,
SourceLocation *RParenLoc = nullptr);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseDoStatement();
StmtResult ParseForStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseGotoStatement();
StmtResult ParseContinueStatement();
StmtResult ParseBreakStatement();
StmtResult ParseReturnStatement();
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
StmtResult ParsePragmaLoopHint(StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
/// Describes the behavior that should be taken for an __if_exists
/// block.
enum IfExistsBehavior {
/// Parse the block; this code is always used.
IEB_Parse,
/// Skip the block entirely; this code is never used.
IEB_Skip,
/// Parse the block as a dependent block, which may be used in
/// some template instantiations but not others.
IEB_Dependent
};
/// Describes the condition of a Microsoft __if_exists or
/// __if_not_exists block.
struct IfExistsCondition {
/// The location of the initial keyword.
SourceLocation KeywordLoc;
/// Whether this is an __if_exists block (rather than an
/// __if_not_exists block).
bool IsIfExists;
/// Nested-name-specifier preceding the name.
CXXScopeSpec SS;
/// The name we're looking for.
UnqualifiedId Name;
/// The behavior of this __if_exists or __if_not_exists block
/// should.
IfExistsBehavior Behavior;
};
bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
void ParseMicrosoftIfExistsExternalDeclaration();
void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
ParsedAttributes &AccessAttrs,
AccessSpecifier &CurAS);
bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
bool &InitExprsOk);
bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
SmallVectorImpl<Expr *> &Constraints,
SmallVectorImpl<Expr *> &Exprs);
//===--------------------------------------------------------------------===//
// C++ 6: Statements and Blocks
StmtResult ParseCXXTryBlock();
StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false);
StmtResult ParseCXXCatchBlock(bool FnCatch = false);
//===--------------------------------------------------------------------===//
// MS: SEH Statements and Blocks
StmtResult ParseSEHTryBlock();
StmtResult ParseSEHExceptBlock(SourceLocation Loc);
StmtResult ParseSEHFinallyBlock(SourceLocation Loc);
StmtResult ParseSEHLeaveStatement();
//===--------------------------------------------------------------------===//
// Objective-C Statements
StmtResult ParseObjCAtStatement(SourceLocation atLoc,
ParsedStmtContext StmtCtx);
StmtResult ParseObjCTryStmt(SourceLocation atLoc);
StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc);
//===--------------------------------------------------------------------===//
// C99 6.7: Declarations.
/// A context for parsing declaration specifiers. TODO: flesh this
/// out, there are other significant restrictions on specifiers than
/// would be best implemented in the parser.
enum class DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
DSC_top_level, // top-level/namespace declaration context
DSC_template_param, // template parameter context
DSC_template_type_arg, // template type argument context
DSC_objc_method_result, // ObjC method result context, enables 'instancetype'
DSC_condition // condition declaration context
};
/// Is this a context in which we are parsing just a type-specifier (or
/// trailing-type-specifier)?
static bool isTypeSpecifier(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
return false;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Whether a defining-type-specifier is permitted in a given context.
enum class AllowDefiningTypeSpec {
/// The grammar doesn't allow a defining-type-specifier here, and we must
/// not parse one (eg, because a '{' could mean something else).
No,
/// The grammar doesn't allow a defining-type-specifier here, but we permit
/// one for error recovery purposes. Sema will reject.
NoButErrorRecovery,
/// The grammar allows a defining-type-specifier here, even though it's
/// always invalid. Sema will reject.
YesButInvalid,
/// The grammar allows a defining-type-specifier here, and one can be valid.
Yes
};
/// Is this a context in which we are parsing defining-type-specifiers (and
/// so permit class and enum definitions in addition to non-defining class and
/// enum elaborated-type-specifiers)?
static AllowDefiningTypeSpec
isDefiningTypeSpecifierContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_alias_declaration:
case DeclSpecContext::DSC_objc_method_result:
return AllowDefiningTypeSpec::Yes;
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_template_param:
return AllowDefiningTypeSpec::YesButInvalid;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
return AllowDefiningTypeSpec::NoButErrorRecovery;
case DeclSpecContext::DSC_trailing:
return AllowDefiningTypeSpec::No;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which an opaque-enum-declaration can appear?
static bool isOpaqueEnumDeclarationContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
return true;
case DeclSpecContext::DSC_alias_declaration:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which we can perform class template argument
/// deduction?
static bool isClassTemplateDeductionContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_type_specifier:
return true;
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Information on a C++0x for-range-initializer found while parsing a
/// declaration which turns out to be a for-range-declaration.
struct ForRangeInit {
SourceLocation ColonLoc;
ExprResult RangeExpr;
bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); }
};
struct ForRangeInfo : ForRangeInit {
StmtResult LoopVar;
};
DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
SourceLocation *DeclSpecStart = nullptr);
DeclGroupPtrTy
ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs, bool RequireSemi,
ForRangeInit *FRI = nullptr,
SourceLocation *DeclSpecStart = nullptr);
bool MightBeDeclarator(DeclaratorContext Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context,
SourceLocation *DeclEnd = nullptr,
ForRangeInit *FRI = nullptr);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
bool ParseAsmAttributesAfterDeclarator(Declarator &D);
Decl *ParseDeclarationAfterDeclaratorAndAttributes(
Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ForRangeInit *FRI = nullptr);
Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope);
Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope);
/// When in code-completion, skip parsing of the function/method body
/// unless the body contains the code-completion point.
///
/// \returns true if the function body was skipped.
bool trySkippingFunctionBody();
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
ParsedAttributesWithRange &Attrs);
DeclSpecContext
getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context);
void ParseDeclarationSpecifiers(
DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal,
LateParsedAttrList *LateAttrs = nullptr);
bool DiagnoseMissingSemiAfterTagDefinition(
DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs = nullptr);
void ParseSpecifierQualifierList(
DeclSpec &DS, AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
DeclaratorContext Context);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType,
RecordDecl *TagDecl);
void ParseStructDeclaration(
ParsingDeclSpec &DS,
llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback);
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
/// is definitely a type-specifier. Return false if it isn't part of a type
/// specifier or if we're not sure.
bool isKnownToBeTypeSpecifier(const Token &Tok) const;
/// Return true if we know that we are definitely looking at a
/// decl-specifier, and isn't part of an expression such as a function-style
/// cast. Return false if it's no a decl-specifier, or we're not sure.
bool isKnownToBeDeclarationSpecifier() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationSpecifier() == TPResult::True;
return isDeclarationSpecifier(true);
}
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
bool isDeclarationStatement() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationStatement();
return isDeclarationSpecifier(true);
}
/// isForInitDeclaration - Disambiguates between a declaration or an
/// expression in the context of the C 'clause-1' or the C++
// 'for-init-statement' part of a 'for' statement.
/// Returns true for declaration, false for expression.
bool isForInitDeclaration() {
if (getLangOpts().OpenMP)
Actions.startOpenMPLoop();
if (getLangOpts().CPlusPlus)
return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
return isDeclarationSpecifier(true);
}
/// Determine whether this is a C++1z for-range-identifier.
bool isForRangeIdentifier();
/// Determine whether we are currently at the start of an Objective-C
/// class message that appears to be missing the open bracket '['.
bool isStartOfObjCClassMessageMissingOpenBracket();
/// Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false);
/// Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
TypeIdAsTemplateArgument
};
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
bool isTypeIdInParens(bool &isAmbiguous) {
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdInParens, isAmbiguous);
isAmbiguous = false;
return isTypeSpecifierQualifier();
}
bool isTypeIdInParens() {
bool isAmbiguous;
return isTypeIdInParens(isAmbiguous);
}
/// Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
bool IsAmbiguous;
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
return isTypeSpecifierQualifier();
}
/// isCXXDeclarationStatement - C++-specialized function that disambiguates
/// between a declaration or an expression statement, when parsing function
/// bodies. Returns true for declaration, false for expression.
bool isCXXDeclarationStatement();
/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
/// between a simple-declaration or an expression-statement.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
/// Returns false if the statement is disambiguated as expression.
bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
/// Returns true for function declarator and false for constructor-style
/// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration
/// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr);
struct ConditionDeclarationOrInitStatementState;
enum class ConditionOrInitStatement {
Expression, ///< Disambiguated as an expression (either kind).
ConditionDecl, ///< Disambiguated as the declaration form of condition.
InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement.
ForRangeDecl, ///< Disambiguated as a for-range declaration.
Error ///< Can't be any of the above!
};
/// Disambiguates between the different kinds of things that can happen
/// after 'if (' or 'switch ('. This could be one of two different kinds of
/// declaration (depending on whether there is a ';' later) or an expression.
ConditionOrInitStatement
isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt,
bool CanBeForRangeDecl);
bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous);
bool isCXXTypeId(TentativeCXXTypeIdContext Context) {
bool isAmbiguous;
return isCXXTypeId(Context, isAmbiguous);
}
/// TPResult - Used as the result value for functions whose purpose is to
/// disambiguate C++ constructs by "tentatively parsing" them.
enum class TPResult {
True, False, Ambiguous, Error
};
/// Determine whether we could have an enum-base.
///
/// \p AllowSemi If \c true, then allow a ';' after the enum-base; otherwise
/// only consider this to be an enum-base if the next token is a '{'.
///
/// \return \c false if this cannot possibly be an enum base; \c true
/// otherwise.
bool isEnumBase(bool AllowSemi);
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a
/// declaration specifier, TPResult::False if it is not,
/// TPResult::Ambiguous if it could be either a decl-specifier or a
/// function-style cast, and TPResult::Error if a parsing error was
/// encountered. If it could be a braced C++11 function-style cast, returns
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False,
bool *InvalidAsDeclSpec = nullptr);
/// Given that isCXXDeclarationSpecifier returns \c TPResult::True or
/// \c TPResult::Ambiguous, determine whether the decl-specifier would be
/// a type-specifier other than a cv-qualifier.
bool isCXXDeclarationSpecifierAType();
/// Determine whether the current token sequence might be
/// '<' template-argument-list '>'
/// rather than a less-than expression.
TPResult isTemplateArgumentList(unsigned TokensToSkip);
/// Determine whether an '(' after an 'explicit' keyword is part of a C++20
/// 'explicit(bool)' declaration, in earlier language modes where that is an
/// extension.
TPResult isExplicitBool();
/// Determine whether an identifier has been tentatively declared as a
/// non-type. Such tentative declarations should not be found to name a type
/// during a tentative parse, but also should not be annotated as a non-type.
bool isTentativelyDeclared(IdentifierInfo *II);
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error.
// Returning TPResult::True/False indicates that the ambiguity was
// resolved and tentative parsing may stop. TPResult::Ambiguous indicates
// that more tentative parsing is necessary for disambiguation.
// They all consume tokens, so backtracking should be used after calling them.
TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true,
bool mayHaveDirectInit = false);
TPResult
TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr,
bool VersusTemplateArg = false);
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
/// Try to skip a possibly empty sequence of 'attribute-specifier's without
/// full validation of the syntactic structure of attributes.
bool TrySkipAttributes();
public:
TypeResult
ParseTypeName(SourceRange *Range = nullptr,
DeclaratorContext Context = DeclaratorContext::TypeName,
AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr,
ParsedAttributes *Attrs = nullptr);
private:
void ParseBlockId(SourceLocation CaretLoc);
/// Are [[]] attributes enabled?
bool standardAttributesAllowed() const {
const LangOptions &LO = getLangOpts();
return LO.DoubleSquareBracketAttributes;
}
// Check for the start of an attribute-specifier-seq in a context where an
// attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
bool DiagnoseProhibitedCXX11Attribute();
void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation) {
if (!standardAttributesAllowed())
return;
if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
}
void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation);
void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
DeclSpec &DS, Sema::TagUseKind TUK);
// FixItLoc = possible correct location for the attributes
void ProhibitAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clear();
}
void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clearListOnly();
}
void DiagnoseProhibitedAttributes(const SourceRange &Range,
SourceLocation FixItLoc);
// Forbid C++11 and C2x attributes that appear on certain syntactic locations
// which standard permits but we don't supported yet, for example, attributes
// appertain to decl specifiers.
void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
unsigned DiagID,
bool DiagnoseEmptyAttrs = false);
/// Skip C++11 and C2x attributes and return the end location of the
/// last one.
/// \returns SourceLocation() if there are no attributes.
SourceLocation SkipCXX11Attributes();
/// Diagnose and skip C++11 and C2x attributes that appear in syntactic
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
/// Emit warnings for C++11 and C2x attributes that are in a position that
/// clang accepts as an extension.
void DiagnoseCXX11AttributeExtension(ParsedAttributesWithRange &Attrs);
/// Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
/// list with the given attribute syntax. Returns the number of arguments
/// parsed for the attribute.
unsigned
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
enum ParseAttrKindMask {
PAKM_GNU = 1 << 0,
PAKM_Declspec = 1 << 1,
PAKM_CXX11 = 1 << 2,
};
/// \brief Parse attributes based on what syntaxes are desired, allowing for
/// the order to vary. e.g. with PAKM_GNU | PAKM_Declspec:
/// __attribute__((...)) __declspec(...) __attribute__((...)))
/// Note that Microsoft attributes (spelled with single square brackets) are
/// not supported by this because of parsing ambiguities with other
/// constructs.
///
/// There are some attribute parse orderings that should not be allowed in
/// arbitrary order. e.g.,
///
/// [[]] __attribute__(()) int i; // OK
/// __attribute__(()) [[]] int i; // Not OK
///
/// Such situations should use the specific attribute parsing functionality.
void ParseAttributes(unsigned WhichAttrKinds,
ParsedAttributesWithRange &Attrs,
SourceLocation *End = nullptr,
LateParsedAttrList *LateAttrs = nullptr);
void ParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs,
SourceLocation *End = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
ParsedAttributesWithRange AttrsWithRange(AttrFactory);
ParseAttributes(WhichAttrKinds, AttrsWithRange, End, LateAttrs);
Attrs.takeAllFrom(AttrsWithRange);
}
/// \brief Possibly parse attributes based on what syntaxes are desired,
/// allowing for the order to vary.
bool MaybeParseAttributes(unsigned WhichAttrKinds,
ParsedAttributesWithRange &Attrs,
SourceLocation *End = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec) ||
(standardAttributesAllowed() && isCXX11AttributeSpecifier())) {
ParseAttributes(WhichAttrKinds, Attrs, End, LateAttrs);
return true;
}
return false;
}
bool MaybeParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs,
SourceLocation *End = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec) ||
(standardAttributesAllowed() && isCXX11AttributeSpecifier())) {
ParseAttributes(WhichAttrKinds, Attrs, End, LateAttrs);
return true;
}
return false;
}
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributes attrs(AttrFactory);
SourceLocation endLoc;
ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D);
D.takeAttributes(attrs, endLoc);
}
}
/// Parses GNU-style attributes and returns them without source range
/// information.
///
/// This API is discouraged. Use the version that takes a
/// ParsedAttributesWithRange instead.
bool MaybeParseGNUAttributes(ParsedAttributes &Attrs,
SourceLocation *EndLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributesWithRange AttrsWithRange(AttrFactory);
ParseGNUAttributes(Attrs, EndLoc, LateAttrs);
Attrs.takeAllFrom(AttrsWithRange);
return true;
}
return false;
}
bool MaybeParseGNUAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation *EndLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParseGNUAttributes(Attrs, EndLoc, LateAttrs);
return true;
}
return false;
}
/// Parses GNU-style attributes and returns them without source range
/// information.
///
/// This API is discouraged. Use the version that takes a
/// ParsedAttributesWithRange instead.
void ParseGNUAttributes(ParsedAttributes &Attrs,
SourceLocation *EndLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr) {
ParsedAttributesWithRange AttrsWithRange(AttrFactory);
ParseGNUAttributes(AttrsWithRange, EndLoc, LateAttrs, D);
Attrs.takeAllFrom(AttrsWithRange);
}
void ParseGNUAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation *EndLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax, Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
unsigned
ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseCXX11Attributes(Declarator &D) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation endLoc;
ParseCXX11Attributes(attrs, &endLoc);
D.takeAttributes(attrs, endLoc);
}
}
bool MaybeParseCXX11Attributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
return true;
}
return false;
}
bool MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
bool OuterMightBeMessageSend = false) {
if (standardAttributesAllowed() &&
isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) {
ParseCXX11Attributes(attrs, endLoc);
return true;
}
return false;
}
void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
SourceLocation *EndLoc = nullptr);
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = nullptr);
/// Parses a C++11 (or C2x)-style attribute argument list. Returns true
/// if this results in adding an attribute to the ParsedAttributes list.
bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc);
IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(attrs, endLoc);
}
void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs);
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr);
bool MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr) {
const auto &LO = getLangOpts();
if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) {
ParseMicrosoftDeclSpecs(Attrs, End);
return true;
}
return false;
}
void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr);
bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
VersionTuple ParseVersionTuple(SourceRange &Range);
void ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation AvailabilityLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
Optional<AvailabilitySpec> ParseAvailabilitySpec();
ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol,
SourceLocation Loc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseSwiftNewTypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void
ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc, ParsedAttributes &Attrs,
SourceLocation *EndLoc, IdentifierInfo *ScopeName,
SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
ExprResult ParseAlignArgument(SourceLocation Start,
SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
ExprResult ParseExtIntegerArgument();
VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const;
VirtSpecifiers::Specifier isCXX11VirtSpecifier() const {
return isCXX11VirtSpecifier(Tok);
}
void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface,
SourceLocation FriendLoc);
bool isCXX11FinalKeyword() const;
bool isClassCompatibleKeyword() const;
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
/// finished.
class DeclaratorScopeObj {
Parser &P;
CXXScopeSpec &SS;
bool EnteredScope;
bool CreatedScope;
public:
DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss)
: P(p), SS(ss), EnteredScope(false), CreatedScope(false) {}
void EnterDeclaratorScope() {
assert(!EnteredScope && "Already entered the scope!");
assert(SS.isSet() && "C++ scope was not set!");
CreatedScope = true;
P.EnterScope(0); // Not a decl scope.
if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
EnteredScope = true;
}
~DeclaratorScopeObj() {
if (EnteredScope) {
assert(SS.isSet() && "C++ scope was cleared ?");
P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
}
if (CreatedScope)
P.ExitScope();
}
};
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
void ParseDeclarator(Declarator &D);
/// A function that parses a variant of direct-declarator.
typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
void ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser);
enum AttrRequirements {
AR_NoAttributesParsed = 0, ///< No attributes are diagnosed.
AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes.
AR_GNUAttributesParsed = 1 << 1,
AR_CXX11AttributesParsed = 1 << 2,
AR_DeclspecAttributesParsed = 1 << 3,
AR_AllAttributesParsed = AR_GNUAttributesParsed |
AR_CXX11AttributesParsed |
AR_DeclspecAttributesParsed,
AR_VendorAttributesParsed = AR_GNUAttributesParsed |
AR_DeclspecAttributesParsed
};
void ParseTypeQualifierListOpt(
DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed,
bool AtomicAllowed = true, bool IdentifierRequired = false,
Optional<llvm::function_ref<void()>> CodeCompletionHandler = None);
void ParseDirectDeclarator(Declarator &D);
void ParseDecompositionDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
void ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker,
bool IsAmbiguous,
bool RequiresArg = false);
void InitCXXThisScopeForDeclaratorIfRelevant(
const Declarator &D, const DeclSpec &DS,
llvm::Optional<Sema::CXXThisScopeRAII> &ThisScope);
bool ParseRefQualifier(bool &RefQualifierIsLValueRef,
SourceLocation &RefQualifierLoc);
bool isFunctionDeclaratorIdentifierList();
void ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo);
void ParseParameterDeclarationClause(
DeclaratorContext DeclaratorContext,
ParsedAttributes &attrs,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
SourceLocation &EllipsisLoc);
void ParseBracketDeclarator(Declarator &D);
void ParseMisplacedBracketDeclarator(Declarator &D);
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
/// The kind of attribute specifier we have found.
enum CXX11AttributeKind {
/// This is not an attribute specifier.
CAK_NotAttributeSpecifier,
/// This should be treated as an attribute-specifier.
CAK_AttributeSpecifier,
/// The next tokens are '[[', but this is not an attribute-specifier. This
/// is ill-formed by C++11 [dcl.attr.grammar]p6.
CAK_InvalidAttributeSpecifier
};
CXX11AttributeKind
isCXX11AttributeSpecifier(bool Disambiguate = false,
bool OuterMightBeMessageSend = false);
void DiagnoseUnexpectedNamespace(NamedDecl *Context);
DeclGroupPtrTy ParseNamespace(DeclaratorContext Context,
SourceLocation &DeclEnd,
SourceLocation InlineLoc = SourceLocation());
struct InnerNamespaceInfo {
SourceLocation NamespaceLoc;
SourceLocation InlineLoc;
SourceLocation IdentLoc;
IdentifierInfo *Ident;
};
using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>;
void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs,
unsigned int index, SourceLocation &InlineLoc,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker);
Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context);
Decl *ParseExportDeclaration();
DeclGroupPtrTy ParseUsingDirectiveOrDeclaration(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs);
Decl *ParseUsingDirective(DeclaratorContext Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributes &attrs);
struct UsingDeclarator {
SourceLocation TypenameLoc;
CXXScopeSpec SS;
UnqualifiedId Name;
SourceLocation EllipsisLoc;
void clear() {
TypenameLoc = EllipsisLoc = SourceLocation();
SS.clear();
Name.clear();
}
};
bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D);
DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &Attrs,
AccessSpecifier AS = AS_none);
Decl *ParseAliasDeclarationAfterDeclarator(
const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc,
UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS,
ParsedAttributes &Attrs, Decl **OwnedType = nullptr);
Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc,
SourceLocation AliasLoc, IdentifierInfo *Alias,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
bool isValidAfterTypeSpecifier(bool CouldBeBitfield);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
DeclSpecContext DSC,
ParsedAttributesWithRange &Attributes);
void SkipCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
unsigned TagType,
Decl *TagDecl);
void ParseCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
ParsedAttributesWithRange &Attrs,
unsigned TagType,
Decl *TagDecl);
ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
bool
ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo,
VirtSpecifiers &VS,
ExprResult &BitfieldSize,
LateParsedAttrList &LateAttrs);
void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D,
VirtSpecifiers &VS);
DeclGroupPtrTy ParseCXXClassMemberDeclaration(
AccessSpecifier AS, ParsedAttributes &Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ParsingDeclRAIIObject *DiagsFromTParams = nullptr);
DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(
AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
DeclSpec::TST TagType, Decl *Tag);
void ParseConstructorInitializer(Decl *ConstructorDecl);
MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
Decl *ThisDecl);
//===--------------------------------------------------------------------===//
// C++ 10: Derived classes [class.derived]
TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
SourceLocation &EndLocation);
void ParseBaseClause(Decl *ClassDecl);
BaseResult ParseBaseSpecifier(Decl *ClassDecl);
AccessSpecifier getAccessSpecifierIfPresent() const;
bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
ParsedType ObjectType,
bool ObjectHadErrors,
SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
UnqualifiedId &Id,
bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
//===--------------------------------------------------------------------===//
// OpenMP: Directives and clauses.
/// Parse clauses for '#pragma omp declare simd'.
DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr,
CachedTokens &Toks,
SourceLocation Loc);
/// Parse a property kind into \p TIProperty for the selector set \p Set and
/// selector \p Selector.
void parseOMPTraitPropertyKind(OMPTraitProperty &TIProperty,
llvm::omp::TraitSet Set,
llvm::omp::TraitSelector Selector,
llvm::StringMap<SourceLocation> &Seen);
/// Parse a selector kind into \p TISelector for the selector set \p Set.
void parseOMPTraitSelectorKind(OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &Seen);
/// Parse a selector set kind into \p TISet.
void parseOMPTraitSetKind(OMPTraitSet &TISet,
llvm::StringMap<SourceLocation> &Seen);
/// Parses an OpenMP context property.
void parseOMPContextProperty(OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &Seen);
/// Parses an OpenMP context selector.
void parseOMPContextSelector(OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &SeenSelectors);
/// Parses an OpenMP context selector set.
void parseOMPContextSelectorSet(OMPTraitSet &TISet,
llvm::StringMap<SourceLocation> &SeenSets);
/// Parses OpenMP context selectors.
bool parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo &TI);
/// Parse a `match` clause for an '#pragma omp declare variant'. Return true
/// if there was an error.
bool parseOMPDeclareVariantMatchClause(SourceLocation Loc, OMPTraitInfo &TI,
OMPTraitInfo *ParentTI);
/// Parse clauses for '#pragma omp declare variant'.
void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks,
SourceLocation Loc);
/// Parse 'omp [begin] assume[s]' directive.
void ParseOpenMPAssumesDirective(OpenMPDirectiveKind DKind,
SourceLocation Loc);
/// Parse 'omp end assumes' directive.
void ParseOpenMPEndAssumesDirective(SourceLocation Loc);
/// Parse clauses for '#pragma omp [begin] declare target'.
void ParseOMPDeclareTargetClauses(Sema::DeclareTargetContextInfo &DTCI);
/// Parse '#pragma omp end declare target'.
void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind BeginDKind,
OpenMPDirectiveKind EndDKind,
SourceLocation Loc);
/// Skip tokens until a `annot_pragma_openmp_end` was found. Emit a warning if
/// it is not the current token.
void skipUntilPragmaOpenMPEnd(OpenMPDirectiveKind DKind);
/// Check the \p FoundKind against the \p ExpectedKind, if not issue an error
/// that the "end" matching the "begin" directive of kind \p BeginKind was not
/// found. Finally, if the expected kind was found or if \p SkipUntilOpenMPEnd
/// is set, skip ahead using the helper `skipUntilPragmaOpenMPEnd`.
void parseOMPEndDirective(OpenMPDirectiveKind BeginKind,
OpenMPDirectiveKind ExpectedKind,
OpenMPDirectiveKind FoundKind,
SourceLocation MatchingLoc,
SourceLocation FoundLoc,
bool SkipUntilOpenMPEnd);
/// Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
bool Delayed = false, DeclSpec::TST TagType = DeclSpec::TST_unspecified,
Decl *TagDecl = nullptr);
/// Parse 'omp declare reduction' construct.
DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS);
/// Parses initializer for provided omp_priv declaration inside the reduction
/// initializer.
void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm);
/// Parses 'omp declare mapper' directive.
DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS);
/// Parses variable declaration in 'omp declare mapper' directive.
TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range,
DeclarationName &Name,
AccessSpecifier AS = AS_none);
/// Tries to parse cast part of OpenMP array shaping operation:
/// '[' expression ']' { '[' expression ']' } ')'.
bool tryParseOpenMPArrayShapingCastPart();
/// Parses simple list of variables.
///
/// \param Kind Kind of the directive.
/// \param Callback Callback function to be called for the list elements.
/// \param AllowScopeSpecifier true, if the variables can have fully
/// qualified names.
///
bool ParseOpenMPSimpleVarList(
OpenMPDirectiveKind Kind,
const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> &
Callback,
bool AllowScopeSpecifier);
/// Parses declarative or executable directive.
///
/// \param StmtCtx The context in which we're parsing the directive.
StmtResult
ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx);
/// Parses clause of kind \a CKind for directive of a kind \a Kind.
///
/// \param DKind Kind of current directive.
/// \param CKind Kind of current clause.
/// \param FirstClause true, if this is the first clause of a kind \a CKind
/// in current directive.
///
OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause);
/// Parses clause with a single expression of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses simple clause of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly);
/// Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
/// \param DKind Directive kind.
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses the 'sizes' clause of a '#pragma omp tile' directive.
OMPClause *ParseOpenMPSizesClause();
/// Parses clause without any additional arguments.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false);
/// Parses clause with the list of variables of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind, bool ParseOnly);
/// Parses and creates OpenMP 5.0 iterators expression:
/// <iterators> = 'iterator' '(' { [ <iterator-type> ] identifier =
/// <range-specification> }+ ')'
ExprResult ParseOpenMPIteratorsExpr();
/// Parses allocators and traits in the context of the uses_allocator clause.
/// Expected format:
/// '(' { <allocator> [ '(' <allocator_traits> ')' ] }+ ')'
OMPClause *ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind);
/// Parses clause with an interop variable of kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
//
OMPClause *ParseOpenMPInteropClause(OpenMPClauseKind Kind, bool ParseOnly);
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
/// \param RLoc Returned location of right paren.
ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc,
bool IsAddressOfOperand = false);
/// Data used for parsing list of variables in OpenMP clauses.
struct OpenMPVarListDataTy {
Expr *DepModOrTailExpr = nullptr;
SourceLocation ColonLoc;
SourceLocation RLoc;
CXXScopeSpec ReductionOrMapperIdScopeSpec;
DeclarationNameInfo ReductionOrMapperId;
int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or
///< lastprivate clause.
SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers>
MapTypeModifiers;
SmallVector<SourceLocation, NumberOfOMPMapClauseModifiers>
MapTypeModifiersLoc;
SmallVector<OpenMPMotionModifierKind, NumberOfOMPMotionModifiers>
MotionModifiers;
SmallVector<SourceLocation, NumberOfOMPMotionModifiers> MotionModifiersLoc;
bool IsMapTypeImplicit = false;
SourceLocation ExtraModifierLoc;
};
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
OpenMPVarListDataTy &Data);
bool ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType,
bool ObjectHadErrors, bool EnteringContext,
bool AllowDestructorName, bool AllowConstructorName,
bool AllowDeductionGuide,
SourceLocation *TemplateKWLoc, UnqualifiedId &Result);
/// Parses the mapper modifier in map, to, and from clauses.
bool parseMapperModifier(OpenMPVarListDataTy &Data);
/// Parses map-type-modifiers in map clause.
/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list)
/// where, map-type-modifier ::= always | close | mapper(mapper-identifier)
bool parseMapTypeModifiers(OpenMPVarListDataTy &Data);
private:
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
// C++ 14.1: Template Parameters [temp.param]
Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS);
Decl *ParseSingleDeclarationAfterTemplate(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none);
bool ParseTemplateParameters(MultiParseScope &TemplateScopes, unsigned Depth,
SmallVectorImpl<NamedDecl *> &TemplateParams,
SourceLocation &LAngleLoc,
SourceLocation &RAngleLoc);
bool ParseTemplateParameterList(unsigned Depth,
SmallVectorImpl<NamedDecl*> &TemplateParams);
TPResult isStartOfTemplateTypeParameter();
NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position);
bool isTypeConstraintAnnotation();
bool TryAnnotateTypeConstraint();
void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc,
SourceLocation CorrectLoc,
bool AlreadyHasEllipsis,
bool IdentifierHasName);
void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
Declarator &D);
// C++ 14.3: Template arguments [temp.arg]
typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
bool ParseGreaterThanInTemplateList(SourceLocation LAngleLoc,
SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList);
bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
SourceLocation &RAngleLoc);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &TemplateName,
bool AllowTypeAnnotation = true,
bool TypeConstraint = false);
void AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS,
bool IsClassName = false);
bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(DeclaratorContext Context,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
// C++2a: Template, concept definition [temp]
Decl *
ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// Modules
DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl);
Decl *ParseModuleImport(SourceLocation AtLoc);
bool parseMisplacedModuleImport();
bool tryParseMisplacedModuleImport() {
tok::TokenKind Kind = Tok.getKind();
if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end ||
Kind == tok::annot_module_include)
return parseMisplacedModuleImport();
return false;
}
bool ParseModuleName(
SourceLocation UseLoc,
SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path,
bool IsImport);
//===--------------------------------------------------------------------===//
// C++11/G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseTypeTrait();
//===--------------------------------------------------------------------===//
// Embarcadero: Arary and Expression Traits
ExprResult ParseArrayTypeTrait();
ExprResult ParseExpressionTrait();
//===--------------------------------------------------------------------===//
// Preprocessor code-completion pass-through
void CodeCompleteDirective(bool InConditional) override;
void CodeCompleteInConditionalExclusion() override;
void CodeCompleteMacroName(bool IsDefinition) override;
void CodeCompletePreprocessorExpression() override;
void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo,
unsigned ArgumentIndex) override;
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override;
void CodeCompleteNaturalLanguage() override;
class GNUAsmQualifiers {
unsigned Qualifiers = AQ_unspecified;
public:
enum AQ {
AQ_unspecified = 0,
AQ_volatile = 1,
AQ_inline = 2,
AQ_goto = 4,
};
static const char *getQualifierName(AQ Qualifier);
bool setAsmQualifier(AQ Qualifier);
inline bool isVolatile() const { return Qualifiers & AQ_volatile; };
inline bool isInline() const { return Qualifiers & AQ_inline; };
inline bool isGoto() const { return Qualifiers & AQ_goto; }
};
bool isGCCAsmStatement(const Token &TokAfterAsm) const;
bool isGNUAsmQualifier(const Token &TokAfterAsm) const;
GNUAsmQualifiers::AQ getGNUAsmQualifier(const Token &Tok) const;
bool parseGNUAsmQualifierListOpt(GNUAsmQualifiers &AQ);
};
} // end namespace clang
#endif
|
GB_unop__identity_bool_int16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_bool_int16)
// op(A') function: GB (_unop_tran__identity_bool_int16)
// C type: bool
// A type: int16_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
bool z = (bool) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
bool z = (bool) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_bool_int16)
(
bool *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
bool z = (bool) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int16_t aij = Ax [p] ;
bool z = (bool) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_bool_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
unionreduce.h | /******************************************************************************
* ** Copyright (c) 2016, Intel Corporation **
* ** All rights reserved. **
* ** **
* ** Redistribution and use in source and binary forms, with or without **
* ** modification, are permitted provided that the following conditions **
* ** are met: **
* ** 1. Redistributions of source code must retain the above copyright **
* ** notice, this list of conditions and the following disclaimer. **
* ** 2. Redistributions in binary form must reproduce the above copyright **
* ** notice, this list of conditions and the following disclaimer in the **
* ** documentation and/or other materials provided with the distribution. **
* ** 3. Neither the name of the copyright holder nor the names of its **
* ** contributors may be used to endorse or promote products derived **
* ** from this software without specific prior written permission. **
* ** **
* ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
* ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
* ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
* ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
* ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
* ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
* ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
* ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
* ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
* ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
* ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* * ******************************************************************************/
/* Michael Anderson (Intel Corp.)
* * ******************************************************************************/
#ifndef SRC_SINGLENODE_UNIONREDUCE_H_
#define SRC_SINGLENODE_UNIONREDUCE_H_
#include <algorithm>
template <typename Ta, typename Tb, typename Tc>
void union_dense(Ta* v1, int * bv1, int nnz, int num_ints, Tb * v2, int * bv2, Tc * v3, int * bv3,
void (*op_fp)(Ta, Tb, Tc*, void*), void* vsp)
{
#pragma omp parallel for
for (int ii = 0; ii < nnz; ii++) {
bool set1 = get_bitvector(ii, bv1);
bool set2 = get_bitvector(ii, bv2);
if(set1 && !set2)
{
v3[ii] = v1[ii];
}
else if(!set1 && set2)
{
v3[ii] = v2[ii];
}
else if(set1 && set2)
{
op_fp(v1[ii], v2[ii], &(v3[ii]), vsp);
}
}
#pragma omp parallel for
for(int i = 0 ; i < num_ints ; i++)
{
bv3[i] = bv1[i] | bv2[i];
}
}
template <typename Ta, typename Tb>
void union_compressed(Ta* v1, int* indices, int nnz, int capacity, int num_ints, Tb * v2, int * bv2,
void (*op_fp)(Ta, Tb, Tb*, void*), void* vsp)
{
//int * indices = reinterpret_cast<int*>(v1 + nnz);
int npartitions = omp_get_max_threads() * 16;
#pragma omp parallel for
for(int p = 0 ; p < npartitions ; p++)
{
int nz_per = (nnz + npartitions - 1) / npartitions;
int start_nnz = p * nz_per;
int end_nnz = (p+1) * nz_per;
if(end_nnz > nnz) end_nnz = nnz;
// Adjust
if(start_nnz > 0)
{
while((start_nnz < nnz) && (indices[start_nnz]/32 == indices[start_nnz-1]/32)) start_nnz++;
}
while((end_nnz < nnz) && (indices[end_nnz]/32 == indices[end_nnz-1]/32)) end_nnz++;
for(int i = start_nnz ; i < end_nnz ; i++)
{
int idx = indices[i];
if(get_bitvector(idx, bv2))
{
//Tb tmp = v2[idx];
//op_fp(v1[i], tmp, &(v2[idx]), vsp);
op_fp(v1[i], v2[idx], &(v2[idx]), vsp);
}
else
{
set_bitvector(idx, bv2);
v2[idx] = v1[i];
}
}
}
}
#endif // SRC_SINGLENODE_UNIONREDUCE_H_
|
mkl_quantized_conv_ops.h | /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_KERNELS_MKL_QUANTIZED_CONV_OPS_H_
#define TENSORFLOW_CORE_KERNELS_MKL_QUANTIZED_CONV_OPS_H_
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/tensor.h"
#ifdef INTEL_MKL
namespace tensorflow {
template <class T>
float MklFloatForOneQuantizedLevel(float range_min, float range_max) {
int64 highest = static_cast<int64>(Eigen::NumTraits<T>::highest());
int64 lowest = static_cast<int64>(Eigen::NumTraits<T>::lowest());
// Adjusting for having a symmetric range.
// for example: for 8-bit [-127, 127] as opposed to [-128, 127].
if (lowest < -highest) ++lowest;
const float float_for_one_quantized_level =
(range_max - range_min) / (highest - lowest);
return float_for_one_quantized_level;
}
template <class T1, class T2, class T3>
void MklQuantizationRangeForMultiplication(float min_a, float max_a,
float min_b, float max_b,
float* min_c, float* max_c) {
const float a_float_for_one_quant_level =
MklFloatForOneQuantizedLevel<T1>(min_a, max_a);
const float b_float_for_one_quant_level =
MklFloatForOneQuantizedLevel<T2>(min_b, max_b);
const int64 c_highest = static_cast<int64>(Eigen::NumTraits<T3>::highest());
const int64 c_lowest = static_cast<int64>(Eigen::NumTraits<T3>::lowest());
const float c_float_for_one_quant_level =
a_float_for_one_quant_level * b_float_for_one_quant_level;
*min_c = c_float_for_one_quant_level * c_lowest;
*max_c = c_float_for_one_quant_level * c_highest;
}
template <class T1, class T2, class T3>
void MklQuantizationRangeForMultiplication(float min_a, float max_a,
const Tensor& min_b_vector,
const Tensor& max_b_vector,
Tensor** min_c_vector,
Tensor** max_c_vector) {
DCHECK(min_b_vector.NumElements() == (*min_c_vector)->NumElements());
DCHECK(max_b_vector.NumElements() == (*max_c_vector)->NumElements());
size_t n_channel = min_b_vector.NumElements();
const int64 c_highest = static_cast<int64>(Eigen::NumTraits<T3>::highest());
const int64 c_lowest = static_cast<int64>(Eigen::NumTraits<T3>::lowest());
const float* min_b = min_b_vector.flat<float>().data();
const float* max_b = max_b_vector.flat<float>().data();
float* min_c = (*min_c_vector)->flat<float>().data();
float* max_c = (*max_c_vector)->flat<float>().data();
#ifndef ENABLE_MKLDNN_THREADPOOL
#pragma omp parallel for
#endif // !ENABLE_MKLDNN_THREADPOOL
// TODO: Add eigen parallel_for
for (size_t n = 0; n < n_channel; ++n) {
float a_float_for_one_quant_level =
MklFloatForOneQuantizedLevel<T1>(min_a, max_a);
float b_float_for_one_quant_level =
MklFloatForOneQuantizedLevel<T2>(min_b[n], max_b[n]);
float c_float_for_one_quant_level =
a_float_for_one_quant_level * b_float_for_one_quant_level;
min_c[n] = c_float_for_one_quant_level * c_lowest;
max_c[n] = c_float_for_one_quant_level * c_highest;
}
}
} // namespace tensorflow
#endif // INTEL_MKL
#endif // TENSORFLOW_CORE_KERNELS_MKL_QUANTIZED_CONV_OPS_H_
|
tinyexr.h | #ifndef TINYEXR_H_
#define TINYEXR_H_
/*
Copyright (c) 2014 - 2021, Syoyo Fujita and many contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Syoyo Fujita nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// TinyEXR contains some OpenEXR code, which is licensed under ------------
///////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2002, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Industrial Light & Magic nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////
// End of OpenEXR license -------------------------------------------------
//
//
// Do this:
// #define TINYEXR_IMPLEMENTATION
// before you include this file in *one* C or C++ file to create the
// implementation.
//
// // i.e. it should look like this:
// #include ...
// #include ...
// #include ...
// #define TINYEXR_IMPLEMENTATION
// #include "tinyexr.h"
//
//
#include <stddef.h> // for size_t
#include <stdint.h> // guess stdint.h is available(C99)
#ifdef __cplusplus
extern "C" {
#endif
#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
defined(__i386) || defined(__i486__) || defined(__i486) || \
defined(i386) || defined(__ia64__) || defined(__x86_64__)
#define TINYEXR_X86_OR_X64_CPU 1
#else
#define TINYEXR_X86_OR_X64_CPU 0
#endif
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || TINYEXR_X86_OR_X64_CPU
#define TINYEXR_LITTLE_ENDIAN 1
#else
#define TINYEXR_LITTLE_ENDIAN 0
#endif
// Use miniz or not to decode ZIP format pixel. Linking with zlib
// required if this flas is 0.
#ifndef TINYEXR_USE_MINIZ
#define TINYEXR_USE_MINIZ (1)
#endif
// Disable PIZ comporession when applying cpplint.
#ifndef TINYEXR_USE_PIZ
#define TINYEXR_USE_PIZ (1)
#endif
#ifndef TINYEXR_USE_ZFP
#define TINYEXR_USE_ZFP (0) // TinyEXR extension.
// http://computation.llnl.gov/projects/floating-point-compression
#endif
#ifndef TINYEXR_USE_THREAD
#define TINYEXR_USE_THREAD (0) // No threaded loading.
// http://computation.llnl.gov/projects/floating-point-compression
#endif
#ifndef TINYEXR_USE_OPENMP
#ifdef _OPENMP
#define TINYEXR_USE_OPENMP (1)
#else
#define TINYEXR_USE_OPENMP (0)
#endif
#endif
#define TINYEXR_SUCCESS (0)
#define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1)
#define TINYEXR_ERROR_INVALID_EXR_VERSION (-2)
#define TINYEXR_ERROR_INVALID_ARGUMENT (-3)
#define TINYEXR_ERROR_INVALID_DATA (-4)
#define TINYEXR_ERROR_INVALID_FILE (-5)
#define TINYEXR_ERROR_INVALID_PARAMETER (-6)
#define TINYEXR_ERROR_CANT_OPEN_FILE (-7)
#define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-8)
#define TINYEXR_ERROR_INVALID_HEADER (-9)
#define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-10)
#define TINYEXR_ERROR_CANT_WRITE_FILE (-11)
#define TINYEXR_ERROR_SERIALZATION_FAILED (-12)
#define TINYEXR_ERROR_LAYER_NOT_FOUND (-13)
// @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf }
// pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2
#define TINYEXR_PIXELTYPE_UINT (0)
#define TINYEXR_PIXELTYPE_HALF (1)
#define TINYEXR_PIXELTYPE_FLOAT (2)
#define TINYEXR_MAX_HEADER_ATTRIBUTES (1024)
#define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128)
#define TINYEXR_COMPRESSIONTYPE_NONE (0)
#define TINYEXR_COMPRESSIONTYPE_RLE (1)
#define TINYEXR_COMPRESSIONTYPE_ZIPS (2)
#define TINYEXR_COMPRESSIONTYPE_ZIP (3)
#define TINYEXR_COMPRESSIONTYPE_PIZ (4)
#define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension
#define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0)
#define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1)
#define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2)
#define TINYEXR_TILE_ONE_LEVEL (0)
#define TINYEXR_TILE_MIPMAP_LEVELS (1)
#define TINYEXR_TILE_RIPMAP_LEVELS (2)
#define TINYEXR_TILE_ROUND_DOWN (0)
#define TINYEXR_TILE_ROUND_UP (1)
typedef struct _EXRVersion {
int version; // this must be 2
// tile format image;
// not zero for only a single-part "normal" tiled file (according to spec.)
int tiled;
int long_name; // long name attribute
// deep image(EXR 2.0);
// for a multi-part file, indicates that at least one part is of type deep* (according to spec.)
int non_image;
int multipart; // multi-part(EXR 2.0)
} EXRVersion;
typedef struct _EXRAttribute {
char name[256]; // name and type are up to 255 chars long.
char type[256];
unsigned char *value; // uint8_t*
int size;
int pad0;
} EXRAttribute;
typedef struct _EXRChannelInfo {
char name[256]; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} EXRChannelInfo;
typedef struct _EXRTile {
int offset_x;
int offset_y;
int level_x;
int level_y;
int width; // actual width in a tile.
int height; // actual height int a tile.
unsigned char **images; // image[channels][pixels]
} EXRTile;
typedef struct _EXRBox2i {
int min_x;
int min_y;
int max_x;
int max_y;
} EXRBox2i;
typedef struct _EXRHeader {
float pixel_aspect_ratio;
int line_order;
EXRBox2i data_window;
EXRBox2i display_window;
float screen_window_center[2];
float screen_window_width;
int chunk_count;
// Properties for tiled format(`tiledesc`).
int tiled;
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
int long_name;
// for a single-part file, agree with the version field bit 11
// for a multi-part file, it is consistent with the type of part
int non_image;
int multipart;
unsigned int header_len;
// Custom attributes(exludes required attributes(e.g. `channels`,
// `compression`, etc)
int num_custom_attributes;
EXRAttribute *custom_attributes; // array of EXRAttribute. size =
// `num_custom_attributes`.
EXRChannelInfo *channels; // [num_channels]
int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for
// each channel. This is overwritten with `requested_pixel_types` when
// loading.
int num_channels;
int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*)
int *requested_pixel_types; // Filled initially by
// ParseEXRHeaderFrom(Meomory|File), then users
// can edit it(only valid for HALF pixel type
// channel)
// name attribute required for multipart files;
// must be unique and non empty (according to spec.);
// use EXRSetNameAttr for setting value;
// max 255 character allowed - excluding terminating zero
char name[256];
} EXRHeader;
typedef struct _EXRMultiPartHeader {
int num_headers;
EXRHeader *headers;
} EXRMultiPartHeader;
typedef struct _EXRImage {
EXRTile *tiles; // Tiled pixel data. The application must reconstruct image
// from tiles manually. NULL if scanline format.
struct _EXRImage* next_level; // NULL if scanline format or image is the last level.
int level_x; // x level index
int level_y; // y level index
unsigned char **images; // image[channels][pixels]. NULL if tiled format.
int width;
int height;
int num_channels;
// Properties for tile format.
int num_tiles;
} EXRImage;
typedef struct _EXRMultiPartImage {
int num_images;
EXRImage *images;
} EXRMultiPartImage;
typedef struct _DeepImage {
const char **channel_names;
float ***image; // image[channels][scanlines][samples]
int **offset_table; // offset_table[scanline][offsets]
int num_channels;
int width;
int height;
int pad0;
} DeepImage;
// @deprecated { For backward compatibility. Not recommended to use. }
// Loads single-frame OpenEXR image. Assume EXR image contains A(single channel
// alpha) or RGB(A) channels.
// Application must free image data as returned by `out_rgba`
// Result image format is: float x RGBA x width x hight
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXR(float **out_rgba, int *width, int *height,
const char *filename, const char **err);
// Loads single-frame OpenEXR image by specifying layer name. Assume EXR image
// contains A(single channel alpha) or RGB(A) channels. Application must free
// image data as returned by `out_rgba` Result image format is: float x RGBA x
// width x hight Returns negative value and may set error string in `err` when
// there's an error When the specified layer name is not found in the EXR file,
// the function will return `TINYEXR_ERROR_LAYER_NOT_FOUND`.
extern int LoadEXRWithLayer(float **out_rgba, int *width, int *height,
const char *filename, const char *layer_name,
const char **err);
//
// Get layer infos from EXR file.
//
// @param[out] layer_names List of layer names. Application must free memory
// after using this.
// @param[out] num_layers The number of layers
// @param[out] err Error string(will be filled when the function returns error
// code). Free it using FreeEXRErrorMessage after using this value.
//
// @return TINYEXR_SUCCEES upon success.
//
extern int EXRLayers(const char *filename, const char **layer_names[],
int *num_layers, const char **err);
// @deprecated { to be removed. }
// Simple wrapper API for ParseEXRHeaderFromFile.
// checking given file is a EXR file(by just look up header)
// @return TINYEXR_SUCCEES for EXR image, TINYEXR_ERROR_INVALID_HEADER for
// others
extern int IsEXR(const char *filename);
// @deprecated { to be removed. }
// Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels.
// components must be 1(Grayscale), 3(RGB) or 4(RGBA).
// Input image format is: `float x width x height`, or `float x RGB(A) x width x
// hight`
// Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero
// value.
// Save image as fp32(FLOAT) format when `save_as_fp16` is 0.
// Use ZIP compression by default.
// Returns negative value and may set error string in `err` when there's an
// error
extern int SaveEXR(const float *data, const int width, const int height,
const int components, const int save_as_fp16,
const char *filename, const char **err);
// Returns the number of resolution levels of the image (including the base)
extern int EXRNumLevels(const EXRImage* exr_image);
// Initialize EXRHeader struct
extern void InitEXRHeader(EXRHeader *exr_header);
// Set name attribute of EXRHeader struct (it makes a copy)
extern void EXRSetNameAttr(EXRHeader *exr_header, const char* name);
// Initialize EXRImage struct
extern void InitEXRImage(EXRImage *exr_image);
// Frees internal data of EXRHeader struct
extern int FreeEXRHeader(EXRHeader *exr_header);
// Frees internal data of EXRImage struct
extern int FreeEXRImage(EXRImage *exr_image);
// Frees error message
extern void FreeEXRErrorMessage(const char *msg);
// Parse EXR version header of a file.
extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename);
// Parse EXR version header from memory-mapped EXR data.
extern int ParseEXRVersionFromMemory(EXRVersion *version,
const unsigned char *memory, size_t size);
// Parse single-part OpenEXR header from a file and initialize `EXRHeader`.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version,
const char *filename, const char **err);
// Parse single-part OpenEXR header from a memory and initialize `EXRHeader`.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRHeaderFromMemory(EXRHeader *header,
const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err);
// Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*`
// array.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const char *filename,
const char **err);
// Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*`
// array
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const unsigned char *memory,
size_t size, const char **err);
// Loads single-part OpenEXR image from a file.
// Application must setup `ParseEXRHeaderFromFile` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header,
const char *filename, const char **err);
// Loads single-part OpenEXR image from a memory.
// Application must setup `EXRHeader` with
// `ParseEXRHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header,
const unsigned char *memory,
const size_t size, const char **err);
// Loads multi-part OpenEXR image from a file.
// Application must setup `ParseEXRMultipartHeaderFromFile` before calling this
// function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRMultipartImageFromFile(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const char *filename,
const char **err);
// Loads multi-part OpenEXR image from a memory.
// Application must setup `EXRHeader*` array with
// `ParseEXRMultipartHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRMultipartImageFromMemory(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err);
// Saves multi-channel, single-frame OpenEXR image to a file.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int SaveEXRImageToFile(const EXRImage *image,
const EXRHeader *exr_header, const char *filename,
const char **err);
// Saves multi-channel, single-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// Return the number of bytes if success.
// Return zero and will set error string in `err` when there's an
// error.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern size_t SaveEXRImageToMemory(const EXRImage *image,
const EXRHeader *exr_header,
unsigned char **memory, const char **err);
// Saves multi-channel, multi-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// File global attributes (eg. display_window) must be set in the first header.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int SaveEXRMultipartImageToFile(const EXRImage *images,
const EXRHeader **exr_headers,
unsigned int num_parts,
const char *filename, const char **err);
// Saves multi-channel, multi-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// File global attributes (eg. display_window) must be set in the first header.
// Return the number of bytes if success.
// Return zero and will set error string in `err` when there's an
// error.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern size_t SaveEXRMultipartImageToMemory(const EXRImage *images,
const EXRHeader **exr_headers,
unsigned int num_parts,
unsigned char **memory, const char **err);
// Loads single-frame OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadDeepEXR(DeepImage *out_image, const char *filename,
const char **err);
// NOT YET IMPLEMENTED:
// Saves single-frame OpenEXR deep image.
// Returns negative value and may set error string in `err` when there's an
// error
// extern int SaveDeepEXR(const DeepImage *in_image, const char *filename,
// const char **err);
// NOT YET IMPLEMENTED:
// Loads multi-part OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const
// char *filename,
// const char **err);
// For emscripten.
// Loads single-frame OpenEXR image from memory. Assume EXR image contains
// RGB(A) channels.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err);
#ifdef __cplusplus
}
#endif
#endif // TINYEXR_H_
#ifdef TINYEXR_IMPLEMENTATION
#ifndef TINYEXR_IMPLEMENTATION_DEFINED
#define TINYEXR_IMPLEMENTATION_DEFINED
#ifdef _WIN32
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include <windows.h> // for UTF-8
#endif
#include <algorithm>
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <sstream>
// #include <iostream> // debug
#include <limits>
#include <string>
#include <vector>
#include <set>
// https://stackoverflow.com/questions/5047971/how-do-i-check-for-c11-support
#if __cplusplus > 199711L || (defined(_MSC_VER) && _MSC_VER >= 1900)
#define TINYEXR_HAS_CXX11 (1)
// C++11
#include <cstdint>
#if TINYEXR_USE_THREAD
#include <atomic>
#include <thread>
#endif
#endif // __cplusplus > 199711L
#if TINYEXR_USE_OPENMP
#include <omp.h>
#endif
#if TINYEXR_USE_MINIZ
#include <miniz/miniz.h>
#else
// Issue #46. Please include your own zlib-compatible API header before
// including `tinyexr.h`
//#include "zlib.h"
#endif
#if TINYEXR_USE_ZFP
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Weverything"
#endif
#include "zfp.h"
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#endif
namespace tinyexr {
#if __cplusplus > 199711L
// C++11
typedef uint64_t tinyexr_uint64;
typedef int64_t tinyexr_int64;
#else
// Although `long long` is not a standard type pre C++11, assume it is defined
// as a compiler's extension.
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#endif
typedef unsigned long long tinyexr_uint64;
typedef long long tinyexr_int64;
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#endif
// static bool IsBigEndian(void) {
// union {
// unsigned int i;
// char c[4];
// } bint = {0x01020304};
//
// return bint.c[0] == 1;
//}
static void SetErrorMessage(const std::string &msg, const char **err) {
if (err) {
#ifdef _WIN32
(*err) = _strdup(msg.c_str());
#else
(*err) = strdup(msg.c_str());
#endif
}
}
static void SetWarningMessage(const std::string &msg, const char **warn) {
if (warn) {
#ifdef _WIN32
(*warn) = _strdup(msg.c_str());
#else
(*warn) = strdup(msg.c_str());
#endif
}
}
static const int kEXRVersionSize = 8;
static void cpy2(unsigned short *dst_val, const unsigned short *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
}
static void swap2(unsigned short *val) {
#ifdef TINYEXR_LITTLE_ENDIAN
(void)val;
#else
unsigned short tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[1];
dst[1] = src[0];
#endif
}
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-function"
#endif
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-function"
#endif
static void cpy4(int *dst_val, const int *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static void cpy4(unsigned int *dst_val, const unsigned int *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static void cpy4(float *dst_val, const float *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
static void swap4(unsigned int *val) {
#ifdef TINYEXR_LITTLE_ENDIAN
(void)val;
#else
unsigned int tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
static void swap4(int *val) {
#ifdef TINYEXR_LITTLE_ENDIAN
(void)val;
#else
int tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
static void swap4(float *val) {
#ifdef TINYEXR_LITTLE_ENDIAN
(void)val;
#else
float tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
#if 0
static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
dst[4] = src[4];
dst[5] = src[5];
dst[6] = src[6];
dst[7] = src[7];
}
#endif
static void swap8(tinyexr::tinyexr_uint64 *val) {
#ifdef TINYEXR_LITTLE_ENDIAN
(void)val;
#else
tinyexr::tinyexr_uint64 tmp = (*val);
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[7];
dst[1] = src[6];
dst[2] = src[5];
dst[3] = src[4];
dst[4] = src[3];
dst[5] = src[2];
dst[6] = src[1];
dst[7] = src[0];
#endif
}
// https://gist.github.com/rygorous/2156668
union FP32 {
unsigned int u;
float f;
struct {
#if TINYEXR_LITTLE_ENDIAN
unsigned int Mantissa : 23;
unsigned int Exponent : 8;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 8;
unsigned int Mantissa : 23;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
#endif
union FP16 {
unsigned short u;
struct {
#if TINYEXR_LITTLE_ENDIAN
unsigned int Mantissa : 10;
unsigned int Exponent : 5;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 5;
unsigned int Mantissa : 10;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic pop
#endif
static FP32 half_to_float(FP16 h) {
static const FP32 magic = {113 << 23};
static const unsigned int shifted_exp = 0x7c00
<< 13; // exponent mask after shift
FP32 o;
o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits
unsigned int exp_ = shifted_exp & o.u; // just the exponent
o.u += (127 - 15) << 23; // exponent adjust
// handle exponent special cases
if (exp_ == shifted_exp) // Inf/NaN?
o.u += (128 - 16) << 23; // extra exp adjust
else if (exp_ == 0) // Zero/Denormal?
{
o.u += 1 << 23; // extra exp adjust
o.f -= magic.f; // renormalize
}
o.u |= (h.u & 0x8000U) << 16U; // sign bit
return o;
}
static FP16 float_to_half_full(FP32 f) {
FP16 o = {0};
// Based on ISPC reference code (with minor modifications)
if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow)
o.s.Exponent = 0;
else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set)
{
o.s.Exponent = 31;
o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf
} else // Normalized number
{
// Exponent unbias the single, then bias the halfp
int newexp = f.s.Exponent - 127 + 15;
if (newexp >= 31) // Overflow, return signed infinity
o.s.Exponent = 31;
else if (newexp <= 0) // Underflow
{
if ((14 - newexp) <= 24) // Mantissa might be non-zero
{
unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit
o.s.Mantissa = mant >> (14 - newexp);
if ((mant >> (13 - newexp)) & 1) // Check for rounding
o.u++; // Round, might overflow into exp bit, but this is OK
}
} else {
o.s.Exponent = static_cast<unsigned int>(newexp);
o.s.Mantissa = f.s.Mantissa >> 13;
if (f.s.Mantissa & 0x1000) // Check for rounding
o.u++; // Round, might overflow to inf, this is OK
}
}
o.s.Sign = f.s.Sign;
return o;
}
// NOTE: From OpenEXR code
// #define IMF_INCREASING_Y 0
// #define IMF_DECREASING_Y 1
// #define IMF_RAMDOM_Y 2
//
// #define IMF_NO_COMPRESSION 0
// #define IMF_RLE_COMPRESSION 1
// #define IMF_ZIPS_COMPRESSION 2
// #define IMF_ZIP_COMPRESSION 3
// #define IMF_PIZ_COMPRESSION 4
// #define IMF_PXR24_COMPRESSION 5
// #define IMF_B44_COMPRESSION 6
// #define IMF_B44A_COMPRESSION 7
#ifdef __clang__
#pragma clang diagnostic push
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#endif
static const char *ReadString(std::string *s, const char *ptr, size_t len) {
// Read untile NULL(\0).
const char *p = ptr;
const char *q = ptr;
while ((size_t(q - ptr) < len) && (*q) != 0) {
q++;
}
if (size_t(q - ptr) >= len) {
(*s).clear();
return NULL;
}
(*s) = std::string(p, q);
return q + 1; // skip '\0'
}
static bool ReadAttribute(std::string *name, std::string *type,
std::vector<unsigned char> *data, size_t *marker_size,
const char *marker, size_t size) {
size_t name_len = strnlen(marker, size);
if (name_len == size) {
// String does not have a terminating character.
return false;
}
*name = std::string(marker, name_len);
marker += name_len + 1;
size -= name_len + 1;
size_t type_len = strnlen(marker, size);
if (type_len == size) {
return false;
}
*type = std::string(marker, type_len);
marker += type_len + 1;
size -= type_len + 1;
if (size < sizeof(uint32_t)) {
return false;
}
uint32_t data_len;
memcpy(&data_len, marker, sizeof(uint32_t));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (data_len == 0) {
if ((*type).compare("string") == 0) {
// Accept empty string attribute.
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t);
data->resize(1);
(*data)[0] = '\0';
return true;
} else {
return false;
}
}
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
if (size < data_len) {
return false;
}
data->resize(static_cast<size_t>(data_len));
memcpy(&data->at(0), marker, static_cast<size_t>(data_len));
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len;
return true;
}
static void WriteAttributeToMemory(std::vector<unsigned char> *out,
const char *name, const char *type,
const unsigned char *data, int len) {
out->insert(out->end(), name, name + strlen(name) + 1);
out->insert(out->end(), type, type + strlen(type) + 1);
int outLen = len;
tinyexr::swap4(&outLen);
out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen),
reinterpret_cast<unsigned char *>(&outLen) + sizeof(int));
out->insert(out->end(), data, data + len);
}
typedef struct {
std::string name; // less than 255 bytes long
int pixel_type;
int requested_pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} ChannelInfo;
typedef struct {
int min_x;
int min_y;
int max_x;
int max_y;
} Box2iInfo;
struct HeaderInfo {
std::vector<tinyexr::ChannelInfo> channels;
std::vector<EXRAttribute> attributes;
Box2iInfo data_window;
int line_order;
Box2iInfo display_window;
float screen_window_center[2];
float screen_window_width;
float pixel_aspect_ratio;
int chunk_count;
// Tiled format
int tiled; // Non-zero if the part is tiled.
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
unsigned int header_len;
int compression_type;
// required for multi-part or non-image files
std::string name;
// required for multi-part or non-image files
std::string type;
void clear() {
channels.clear();
attributes.clear();
data_window.min_x = 0;
data_window.min_y = 0;
data_window.max_x = 0;
data_window.max_y = 0;
line_order = 0;
display_window.min_x = 0;
display_window.min_y = 0;
display_window.max_x = 0;
display_window.max_y = 0;
screen_window_center[0] = 0.0f;
screen_window_center[1] = 0.0f;
screen_window_width = 0.0f;
pixel_aspect_ratio = 0.0f;
chunk_count = 0;
// Tiled format
tiled = 0;
tile_size_x = 0;
tile_size_y = 0;
tile_level_mode = 0;
tile_rounding_mode = 0;
header_len = 0;
compression_type = 0;
name.clear();
type.clear();
}
};
static bool ReadChannelInfo(std::vector<ChannelInfo> &channels,
const std::vector<unsigned char> &data) {
const char *p = reinterpret_cast<const char *>(&data.at(0));
for (;;) {
if ((*p) == 0) {
break;
}
ChannelInfo info;
tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) -
(p - reinterpret_cast<const char *>(data.data()));
if (data_len < 0) {
return false;
}
p = ReadString(&info.name, p, size_t(data_len));
if ((p == NULL) && (info.name.empty())) {
// Buffer overrun. Issue #51.
return false;
}
const unsigned char *data_end =
reinterpret_cast<const unsigned char *>(p) + 16;
if (data_end >= (data.data() + data.size())) {
return false;
}
memcpy(&info.pixel_type, p, sizeof(int));
p += 4;
info.p_linear = static_cast<unsigned char>(p[0]); // uchar
p += 1 + 3; // reserved: uchar[3]
memcpy(&info.x_sampling, p, sizeof(int)); // int
p += 4;
memcpy(&info.y_sampling, p, sizeof(int)); // int
p += 4;
tinyexr::swap4(&info.pixel_type);
tinyexr::swap4(&info.x_sampling);
tinyexr::swap4(&info.y_sampling);
channels.push_back(info);
}
return true;
}
static void WriteChannelInfo(std::vector<unsigned char> &data,
const std::vector<ChannelInfo> &channels) {
size_t sz = 0;
// Calculate total size.
for (size_t c = 0; c < channels.size(); c++) {
sz += channels[c].name.length() + 1; // +1 for \0
sz += 16; // 4 * int
}
data.resize(sz + 1);
unsigned char *p = &data.at(0);
for (size_t c = 0; c < channels.size(); c++) {
memcpy(p, channels[c].name.c_str(), channels[c].name.length());
p += channels[c].name.length();
(*p) = '\0';
p++;
int pixel_type = channels[c].requested_pixel_type;
int x_sampling = channels[c].x_sampling;
int y_sampling = channels[c].y_sampling;
tinyexr::swap4(&pixel_type);
tinyexr::swap4(&x_sampling);
tinyexr::swap4(&y_sampling);
memcpy(p, &pixel_type, sizeof(int));
p += sizeof(int);
(*p) = channels[c].p_linear;
p += 4;
memcpy(p, &x_sampling, sizeof(int));
p += sizeof(int);
memcpy(p, &y_sampling, sizeof(int));
p += sizeof(int);
}
(*p) = '\0';
}
static void CompressZip(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
#if TINYEXR_USE_MINIZ
//
// Compress the data using miniz
//
mz_ulong outSize = mz_compressBound(src_size);
int ret = mz_compress(
dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)),
src_size);
assert(ret == MZ_OK);
(void)ret;
compressedSize = outSize;
#else
uLong outSize = compressBound(static_cast<uLong>(src_size));
int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)),
src_size);
assert(ret == Z_OK);
compressedSize = outSize;
#endif
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressZip(unsigned char *dst,
unsigned long *uncompressed_size /* inout */,
const unsigned char *src, unsigned long src_size) {
if ((*uncompressed_size) == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
std::vector<unsigned char> tmpBuf(*uncompressed_size);
#if TINYEXR_USE_MINIZ
int ret =
mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (MZ_OK != ret) {
return false;
}
#else
int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (Z_OK != ret) {
return false;
}
#endif
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size);
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(*uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + (*uncompressed_size);
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
// RLE code from OpenEXR --------------------------------------
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wsign-conversion"
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4267) // 'argument': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
const int MIN_RUN_LENGTH = 3;
const int MAX_RUN_LENGTH = 127;
//
// Compress an array of bytes, using run-length encoding,
// and return the length of the compressed data.
//
static int rleCompress(int inLength, const char in[], signed char out[]) {
const char *inEnd = in + inLength;
const char *runStart = in;
const char *runEnd = in + 1;
signed char *outWrite = out;
while (runStart < inEnd) {
while (runEnd < inEnd && *runStart == *runEnd &&
runEnd - runStart - 1 < MAX_RUN_LENGTH) {
++runEnd;
}
if (runEnd - runStart >= MIN_RUN_LENGTH) {
//
// Compressible run
//
*outWrite++ = static_cast<char>(runEnd - runStart) - 1;
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart));
runStart = runEnd;
} else {
//
// Uncompressable run
//
while (runEnd < inEnd &&
((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) ||
(runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) &&
runEnd - runStart < MAX_RUN_LENGTH) {
++runEnd;
}
*outWrite++ = static_cast<char>(runStart - runEnd);
while (runStart < runEnd) {
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart++));
}
}
++runEnd;
}
return static_cast<int>(outWrite - out);
}
//
// Uncompress an array of bytes compressed with rleCompress().
// Returns the length of the oncompressed data, or 0 if the
// length of the uncompressed data would be more than maxLength.
//
static int rleUncompress(int inLength, int maxLength, const signed char in[],
char out[]) {
char *outStart = out;
while (inLength > 0) {
if (*in < 0) {
int count = -(static_cast<int>(*in++));
inLength -= count + 1;
// Fixes #116: Add bounds check to in buffer.
if ((0 > (maxLength -= count)) || (inLength < 0)) return 0;
memcpy(out, in, count);
out += count;
in += count;
} else {
int count = *in++;
inLength -= 2;
if (0 > (maxLength -= count + 1)) return 0;
memset(out, *reinterpret_cast<const char *>(in), count + 1);
out += count + 1;
in++;
}
}
return static_cast<int>(out - outStart);
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
// End of RLE code from OpenEXR -----------------------------------
static void CompressRle(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// outSize will be (srcSiz * 3) / 2 at max.
int outSize = rleCompress(static_cast<int>(src_size),
reinterpret_cast<const char *>(&tmpBuf.at(0)),
reinterpret_cast<signed char *>(dst));
assert(outSize > 0);
compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize);
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressRle(unsigned char *dst,
const unsigned long uncompressed_size,
const unsigned char *src, unsigned long src_size) {
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
// Workaround for issue #112.
// TODO(syoyo): Add more robust out-of-bounds check in `rleUncompress`.
if (src_size <= 2) {
return false;
}
std::vector<unsigned char> tmpBuf(uncompressed_size);
int ret = rleUncompress(static_cast<int>(src_size),
static_cast<int>(uncompressed_size),
reinterpret_cast<const signed char *>(src),
reinterpret_cast<char *>(&tmpBuf.at(0)));
if (ret != static_cast<int>(uncompressed_size)) {
return false;
}
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + uncompressed_size;
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + uncompressed_size;
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
#if TINYEXR_USE_PIZ
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
//
// PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp
//
// -----------------------------------------------------------------
// Copyright (c) 2004, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC)
// (3 clause BSD license)
//
struct PIZChannelData {
unsigned short *start;
unsigned short *end;
int nx;
int ny;
int ys;
int size;
};
//-----------------------------------------------------------------------------
//
// 16-bit Haar Wavelet encoding and decoding
//
// The source code in this file is derived from the encoding
// and decoding routines written by Christian Rouet for his
// PIZ image file format.
//
//-----------------------------------------------------------------------------
//
// Wavelet basis functions without modulo arithmetic; they produce
// the best compression ratios when the wavelet-transformed data are
// Huffman-encoded, but the wavelet transform works only for 14-bit
// data (untransformed data values must be less than (1 << 14)).
//
inline void wenc14(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
short as = static_cast<short>(a);
short bs = static_cast<short>(b);
short ms = (as + bs) >> 1;
short ds = as - bs;
l = static_cast<unsigned short>(ms);
h = static_cast<unsigned short>(ds);
}
inline void wdec14(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
short ls = static_cast<short>(l);
short hs = static_cast<short>(h);
int hi = hs;
int ai = ls + (hi & 1) + (hi >> 1);
short as = static_cast<short>(ai);
short bs = static_cast<short>(ai - hi);
a = static_cast<unsigned short>(as);
b = static_cast<unsigned short>(bs);
}
//
// Wavelet basis functions with modulo arithmetic; they work with full
// 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't
// compress the data quite as well.
//
const int NBITS = 16;
const int A_OFFSET = 1 << (NBITS - 1);
const int M_OFFSET = 1 << (NBITS - 1);
const int MOD_MASK = (1 << NBITS) - 1;
inline void wenc16(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
int ao = (a + A_OFFSET) & MOD_MASK;
int m = ((ao + b) >> 1);
int d = ao - b;
if (d < 0) m = (m + M_OFFSET) & MOD_MASK;
d &= MOD_MASK;
l = static_cast<unsigned short>(m);
h = static_cast<unsigned short>(d);
}
inline void wdec16(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
int m = l;
int d = h;
int bb = (m - (d >> 1)) & MOD_MASK;
int aa = (d + bb - A_OFFSET) & MOD_MASK;
b = static_cast<unsigned short>(bb);
a = static_cast<unsigned short>(aa);
}
//
// 2D Wavelet encoding:
//
static void wav2Encode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1; // == 1 << level
int p2 = 2; // == 1 << (level+1)
//
// Hierarchical loop on smaller dimension n
//
while (p2 <= n) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet encoding
//
if (w14) {
wenc14(*px, *p01, i00, i01);
wenc14(*p10, *p11, i10, i11);
wenc14(i00, i10, *px, *p10);
wenc14(i01, i11, *p01, *p11);
} else {
wenc16(*px, *p01, i00, i01);
wenc16(*p10, *p11, i10, i11);
wenc16(i00, i10, *px, *p10);
wenc16(i01, i11, *p01, *p11);
}
}
//
// Encode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wenc14(*px, *p10, i00, *p10);
else
wenc16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Encode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wenc14(*px, *p01, i00, *p01);
else
wenc16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p = p2;
p2 <<= 1;
}
}
//
// 2D Wavelet decoding:
//
static void wav2Decode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1;
int p2;
//
// Search max level
//
while (p <= n) p <<= 1;
p >>= 1;
p2 = p;
p >>= 1;
//
// Hierarchical loop on smaller dimension n
//
while (p >= 1) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet decoding
//
if (w14) {
wdec14(*px, *p10, i00, i10);
wdec14(*p01, *p11, i01, i11);
wdec14(i00, i01, *px, *p01);
wdec14(i10, i11, *p10, *p11);
} else {
wdec16(*px, *p10, i00, i10);
wdec16(*p01, *p11, i01, i11);
wdec16(i00, i01, *px, *p01);
wdec16(i10, i11, *p10, *p11);
}
}
//
// Decode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wdec14(*px, *p10, i00, *p10);
else
wdec16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Decode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wdec14(*px, *p01, i00, *p01);
else
wdec16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p2 = p;
p >>= 1;
}
}
//-----------------------------------------------------------------------------
//
// 16-bit Huffman compression and decompression.
//
// The source code in this file is derived from the 8-bit
// Huffman compression and decompression routines written
// by Christian Rouet for his PIZ image file format.
//
//-----------------------------------------------------------------------------
// Adds some modification for tinyexr.
const int HUF_ENCBITS = 16; // literal (value) bit length
const int HUF_DECBITS = 14; // decoding bit size (>= 8)
const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size
const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size
const int HUF_DECMASK = HUF_DECSIZE - 1;
struct HufDec { // short code long code
//-------------------------------
unsigned int len : 8; // code length 0
unsigned int lit : 24; // lit p size
unsigned int *p; // 0 lits
};
inline long long hufLength(long long code) { return code & 63; }
inline long long hufCode(long long code) { return code >> 6; }
inline void outputBits(int nBits, long long bits, long long &c, int &lc,
char *&out) {
c <<= nBits;
lc += nBits;
c |= bits;
while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8)));
}
inline long long getBits(int nBits, long long &c, int &lc, const char *&in) {
while (lc < nBits) {
c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++));
lc += 8;
}
lc -= nBits;
return (c >> lc) & ((1 << nBits) - 1);
}
//
// ENCODING TABLE BUILDING & (UN)PACKING
//
//
// Build a "canonical" Huffman code table:
// - for each (uncompressed) symbol, hcode contains the length
// of the corresponding code (in the compressed data)
// - canonical codes are computed and stored in hcode
// - the rules for constructing canonical codes are as follows:
// * shorter codes (if filled with zeroes to the right)
// have a numerically higher value than longer codes
// * for codes with the same length, numerical values
// increase with numerical symbol values
// - because the canonical code table can be constructed from
// symbol lengths alone, the code table can be transmitted
// without sending the actual code values
// - see http://www.compressconsult.com/huffman/
//
static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) {
long long n[59];
//
// For each i from 0 through 58, count the
// number of different codes of length i, and
// store the count in n[i].
//
for (int i = 0; i <= 58; ++i) n[i] = 0;
for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1;
//
// For each i from 58 through 1, compute the
// numerically lowest code with length i, and
// store that code in n[i].
//
long long c = 0;
for (int i = 58; i > 0; --i) {
long long nc = ((c + n[i]) >> 1);
n[i] = c;
c = nc;
}
//
// hcode[i] contains the length, l, of the
// code for symbol i. Assign the next available
// code of length l to the symbol and store both
// l and the code in hcode[i].
//
for (int i = 0; i < HUF_ENCSIZE; ++i) {
int l = static_cast<int>(hcode[i]);
if (l > 0) hcode[i] = l | (n[l]++ << 6);
}
}
//
// Compute Huffman codes (based on frq input) and store them in frq:
// - code structure is : [63:lsb - 6:msb] | [5-0: bit length];
// - max code length is 58 bits;
// - codes outside the range [im-iM] have a null length (unused values);
// - original frequencies are destroyed;
// - encoding tables are used by hufEncode() and hufBuildDecTable();
//
struct FHeapCompare {
bool operator()(long long *a, long long *b) { return *a > *b; }
};
static void hufBuildEncTable(
long long *frq, // io: input frequencies [HUF_ENCSIZE], output table
int *im, // o: min frq index
int *iM) // o: max frq index
{
//
// This function assumes that when it is called, array frq
// indicates the frequency of all possible symbols in the data
// that are to be Huffman-encoded. (frq[i] contains the number
// of occurrences of symbol i in the data.)
//
// The loop below does three things:
//
// 1) Finds the minimum and maximum indices that point
// to non-zero entries in frq:
//
// frq[im] != 0, and frq[i] == 0 for all i < im
// frq[iM] != 0, and frq[i] == 0 for all i > iM
//
// 2) Fills array fHeap with pointers to all non-zero
// entries in frq.
//
// 3) Initializes array hlink such that hlink[i] == i
// for all array entries.
//
std::vector<int> hlink(HUF_ENCSIZE);
std::vector<long long *> fHeap(HUF_ENCSIZE);
*im = 0;
while (!frq[*im]) (*im)++;
int nf = 0;
for (int i = *im; i < HUF_ENCSIZE; i++) {
hlink[i] = i;
if (frq[i]) {
fHeap[nf] = &frq[i];
nf++;
*iM = i;
}
}
//
// Add a pseudo-symbol, with a frequency count of 1, to frq;
// adjust the fHeap and hlink array accordingly. Function
// hufEncode() uses the pseudo-symbol for run-length encoding.
//
(*iM)++;
frq[*iM] = 1;
fHeap[nf] = &frq[*iM];
nf++;
//
// Build an array, scode, such that scode[i] contains the number
// of bits assigned to symbol i. Conceptually this is done by
// constructing a tree whose leaves are the symbols with non-zero
// frequency:
//
// Make a heap that contains all symbols with a non-zero frequency,
// with the least frequent symbol on top.
//
// Repeat until only one symbol is left on the heap:
//
// Take the two least frequent symbols off the top of the heap.
// Create a new node that has first two nodes as children, and
// whose frequency is the sum of the frequencies of the first
// two nodes. Put the new node back into the heap.
//
// The last node left on the heap is the root of the tree. For each
// leaf node, the distance between the root and the leaf is the length
// of the code for the corresponding symbol.
//
// The loop below doesn't actually build the tree; instead we compute
// the distances of the leaves from the root on the fly. When a new
// node is added to the heap, then that node's descendants are linked
// into a single linear list that starts at the new node, and the code
// lengths of the descendants (that is, their distance from the root
// of the tree) are incremented by one.
//
std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
std::vector<long long> scode(HUF_ENCSIZE);
memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE);
while (nf > 1) {
//
// Find the indices, mm and m, of the two smallest non-zero frq
// values in fHeap, add the smallest frq to the second-smallest
// frq, and remove the smallest frq value from fHeap.
//
int mm = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
--nf;
int m = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
frq[m] += frq[mm];
std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
//
// The entries in scode are linked into lists with the
// entries in hlink serving as "next" pointers and with
// the end of a list marked by hlink[j] == j.
//
// Traverse the lists that start at scode[m] and scode[mm].
// For each element visited, increment the length of the
// corresponding code by one bit. (If we visit scode[j]
// during the traversal, then the code for symbol j becomes
// one bit longer.)
//
// Merge the lists that start at scode[m] and scode[mm]
// into a single list that starts at scode[m].
//
//
// Add a bit to all codes in the first list.
//
for (int j = m;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) {
//
// Merge the two lists.
//
hlink[j] = mm;
break;
}
}
//
// Add a bit to all codes in the second list
//
for (int j = mm;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) break;
}
}
//
// Build a canonical Huffman code table, replacing the code
// lengths in scode with (code, code length) pairs. Copy the
// code table from scode into frq.
//
hufCanonicalCodeTable(scode.data());
memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE);
}
//
// Pack an encoding table:
// - only code lengths, not actual codes, are stored
// - runs of zeroes are compressed as follows:
//
// unpacked packed
// --------------------------------
// 1 zero 0 (6 bits)
// 2 zeroes 59
// 3 zeroes 60
// 4 zeroes 61
// 5 zeroes 62
// n zeroes (6 or more) 63 n-6 (6 + 8 bits)
//
const int SHORT_ZEROCODE_RUN = 59;
const int LONG_ZEROCODE_RUN = 63;
const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN;
const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN;
static void hufPackEncTable(
const long long *hcode, // i : encoding table [HUF_ENCSIZE]
int im, // i : min hcode index
int iM, // i : max hcode index
char **pcode) // o: ptr to packed table (updated)
{
char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
int l = hufLength(hcode[im]);
if (l == 0) {
int zerun = 1;
while ((im < iM) && (zerun < LONGEST_LONG_RUN)) {
if (hufLength(hcode[im + 1]) > 0) break;
im++;
zerun++;
}
if (zerun >= 2) {
if (zerun >= SHORTEST_LONG_RUN) {
outputBits(6, LONG_ZEROCODE_RUN, c, lc, p);
outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p);
} else {
outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p);
}
continue;
}
}
outputBits(6, l, c, lc, p);
}
if (lc > 0) *p++ = (unsigned char)(c << (8 - lc));
*pcode = p;
}
//
// Unpack an encoding table packed by hufPackEncTable():
//
static bool hufUnpackEncTable(
const char **pcode, // io: ptr to packed table (updated)
int ni, // i : input size (in bytes)
int im, // i : min hcode index
int iM, // i : max hcode index
long long *hcode) // o: encoding table [HUF_ENCSIZE]
{
memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE);
const char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
if (p - *pcode >= ni) {
return false;
}
long long l = hcode[im] = getBits(6, c, lc, p); // code length
if (l == (long long)LONG_ZEROCODE_RUN) {
if (p - *pcode > ni) {
return false;
}
int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
} else if (l >= (long long)SHORT_ZEROCODE_RUN) {
int zerun = l - SHORT_ZEROCODE_RUN + 2;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
}
}
*pcode = const_cast<char *>(p);
hufCanonicalCodeTable(hcode);
return true;
}
//
// DECODING TABLE BUILDING
//
//
// Clear a newly allocated decoding table so that it contains only zeroes.
//
static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
for (int i = 0; i < HUF_DECSIZE; i++) {
hdecod[i].len = 0;
hdecod[i].lit = 0;
hdecod[i].p = NULL;
}
// memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE);
}
//
// Build a decoding hash table based on the encoding table hcode:
// - short codes (<= HUF_DECBITS) are resolved with a single table access;
// - long code entry allocations are not optimized, because long codes are
// unfrequent;
// - decoding tables are used by hufDecode();
//
static bool hufBuildDecTable(const long long *hcode, // i : encoding table
int im, // i : min index in hcode
int iM, // i : max index in hcode
HufDec *hdecod) // o: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
//
// Init hashtable & loop on all codes.
// Assumes that hufClearDecTable(hdecod) has already been called.
//
for (; im <= iM; im++) {
long long c = hufCode(hcode[im]);
int l = hufLength(hcode[im]);
if (c >> l) {
//
// Error: c is supposed to be an l-bit code,
// but c contains a value that is greater
// than the largest l-bit number.
//
// invalidTableEntry();
return false;
}
if (l > HUF_DECBITS) {
//
// Long code: add a secondary entry
//
HufDec *pl = hdecod + (c >> (l - HUF_DECBITS));
if (pl->len) {
//
// Error: a short code has already
// been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->lit++;
if (pl->p) {
unsigned int *p = pl->p;
pl->p = new unsigned int[pl->lit];
for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i];
delete[] p;
} else {
pl->p = new unsigned int[1];
}
pl->p[pl->lit - 1] = im;
} else if (l) {
//
// Short code: init all primary entries
//
HufDec *pl = hdecod + (c << (HUF_DECBITS - l));
for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) {
if (pl->len || pl->p) {
//
// Error: a short code or a long code has
// already been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->len = l;
pl->lit = im;
}
}
}
return true;
}
//
// Free the long code entries of a decoding table built by hufBuildDecTable()
//
static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table
{
for (int i = 0; i < HUF_DECSIZE; i++) {
if (hdecod[i].p) {
delete[] hdecod[i].p;
hdecod[i].p = 0;
}
}
}
//
// ENCODING
//
inline void outputCode(long long code, long long &c, int &lc, char *&out) {
outputBits(hufLength(code), hufCode(code), c, lc, out);
}
inline void sendCode(long long sCode, int runCount, long long runCode,
long long &c, int &lc, char *&out) {
//
// Output a run of runCount instances of the symbol sCount.
// Output the symbols explicitly, or if that is shorter, output
// the sCode symbol once followed by a runCode symbol and runCount
// expressed as an 8-bit number.
//
if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) {
outputCode(sCode, c, lc, out);
outputCode(runCode, c, lc, out);
outputBits(8, runCount, c, lc, out);
} else {
while (runCount-- >= 0) outputCode(sCode, c, lc, out);
}
}
//
// Encode (compress) ni values based on the Huffman encoding table hcode:
//
static int hufEncode // return: output size (in bits)
(const long long *hcode, // i : encoding table
const unsigned short *in, // i : uncompressed input buffer
const int ni, // i : input buffer size (in bytes)
int rlc, // i : rl code
char *out) // o: compressed output buffer
{
char *outStart = out;
long long c = 0; // bits not yet written to out
int lc = 0; // number of valid bits in c (LSB)
int s = in[0];
int cs = 0;
//
// Loop on input values
//
for (int i = 1; i < ni; i++) {
//
// Count same values or send code
//
if (s == in[i] && cs < 255) {
cs++;
} else {
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
cs = 0;
}
s = in[i];
}
//
// Send remaining code
//
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
if (lc) *out = (c << (8 - lc)) & 0xff;
return (out - outStart) * 8 + lc;
}
//
// DECODING
//
//
// In order to force the compiler to inline them,
// getChar() and getCode() are implemented as macros
// instead of "inline" functions.
//
#define getChar(c, lc, in) \
{ \
c = (c << 8) | *(unsigned char *)(in++); \
lc += 8; \
}
#if 0
#define getCode(po, rlc, c, lc, in, out, ob, oe) \
{ \
if (po == rlc) { \
if (lc < 8) getChar(c, lc, in); \
\
lc -= 8; \
\
unsigned char cs = (c >> lc); \
\
if (out + cs > oe) return false; \
\
/* TinyEXR issue 78 */ \
unsigned short s = out[-1]; \
\
while (cs-- > 0) *out++ = s; \
} else if (out < oe) { \
*out++ = po; \
} else { \
return false; \
} \
}
#else
static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in,
const char *in_end, unsigned short *&out,
const unsigned short *ob, const unsigned short *oe) {
(void)ob;
if (po == rlc) {
if (lc < 8) {
/* TinyEXR issue 78 */
/* TinyEXR issue 160. in + 1 -> in */
if (in >= in_end) {
return false;
}
getChar(c, lc, in);
}
lc -= 8;
unsigned char cs = (c >> lc);
if (out + cs > oe) return false;
// Bounds check for safety
// Issue 100.
if ((out - 1) < ob) return false;
unsigned short s = out[-1];
while (cs-- > 0) *out++ = s;
} else if (out < oe) {
*out++ = po;
} else {
return false;
}
return true;
}
#endif
//
// Decode (uncompress) ni bits based on encoding & decoding tables:
//
static bool hufDecode(const long long *hcode, // i : encoding table
const HufDec *hdecod, // i : decoding table
const char *in, // i : compressed input buffer
int ni, // i : input size (in bits)
int rlc, // i : run-length code
int no, // i : expected output size (in bytes)
unsigned short *out) // o: uncompressed output buffer
{
long long c = 0;
int lc = 0;
unsigned short *outb = out; // begin
unsigned short *oe = out + no; // end
const char *ie = in + (ni + 7) / 8; // input byte size
//
// Loop on input bytes
//
while (in < ie) {
getChar(c, lc, in);
//
// Access decoding table
//
while (lc >= HUF_DECBITS) {
const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK];
if (pl.len) {
//
// Get short code
//
lc -= pl.len;
// std::cout << "lit = " << pl.lit << std::endl;
// std::cout << "rlc = " << rlc << std::endl;
// std::cout << "c = " << c << std::endl;
// std::cout << "lc = " << lc << std::endl;
// std::cout << "in = " << in << std::endl;
// std::cout << "out = " << out << std::endl;
// std::cout << "oe = " << oe << std::endl;
if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
} else {
if (!pl.p) {
return false;
}
// invalidCode(); // wrong code
//
// Search long code
//
int j;
for (j = 0; j < pl.lit; j++) {
int l = hufLength(hcode[pl.p[j]]);
while (lc < l && in < ie) // get more bits
getChar(c, lc, in);
if (lc >= l) {
if (hufCode(hcode[pl.p[j]]) ==
((c >> (lc - l)) & (((long long)(1) << l) - 1))) {
//
// Found : get long code
//
lc -= l;
if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
break;
}
}
}
if (j == pl.lit) {
return false;
// invalidCode(); // Not found
}
}
}
}
//
// Get remaining (short) codes
//
int i = (8 - ni) & 7;
c >>= i;
lc -= i;
while (lc > 0) {
const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK];
if (pl.len) {
lc -= pl.len;
if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
} else {
return false;
// invalidCode(); // wrong (long) code
}
}
if (out - outb != no) {
return false;
}
// notEnoughData ();
return true;
}
static void countFrequencies(std::vector<long long> &freq,
const unsigned short data[/*n*/], int n) {
for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0;
for (int i = 0; i < n; ++i) ++freq[data[i]];
}
static void writeUInt(char buf[4], unsigned int i) {
unsigned char *b = (unsigned char *)buf;
b[0] = i;
b[1] = i >> 8;
b[2] = i >> 16;
b[3] = i >> 24;
}
static unsigned int readUInt(const char buf[4]) {
const unsigned char *b = (const unsigned char *)buf;
return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) |
((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000);
}
//
// EXTERNAL INTERFACE
//
static int hufCompress(const unsigned short raw[], int nRaw,
char compressed[]) {
if (nRaw == 0) return 0;
std::vector<long long> freq(HUF_ENCSIZE);
countFrequencies(freq, raw, nRaw);
int im = 0;
int iM = 0;
hufBuildEncTable(freq.data(), &im, &iM);
char *tableStart = compressed + 20;
char *tableEnd = tableStart;
hufPackEncTable(freq.data(), im, iM, &tableEnd);
int tableLength = tableEnd - tableStart;
char *dataStart = tableEnd;
int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart);
int data_length = (nBits + 7) / 8;
writeUInt(compressed, im);
writeUInt(compressed + 4, iM);
writeUInt(compressed + 8, tableLength);
writeUInt(compressed + 12, nBits);
writeUInt(compressed + 16, 0); // room for future extensions
return dataStart + data_length - compressed;
}
static bool hufUncompress(const char compressed[], int nCompressed,
std::vector<unsigned short> *raw) {
if (nCompressed == 0) {
if (raw->size() != 0) return false;
return false;
}
int im = readUInt(compressed);
int iM = readUInt(compressed + 4);
// int tableLength = readUInt (compressed + 8);
int nBits = readUInt(compressed + 12);
if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false;
const char *ptr = compressed + 20;
//
// Fast decoder needs at least 2x64-bits of compressed data, and
// needs to be run-able on this platform. Otherwise, fall back
// to the original decoder
//
// if (FastHufDecoder::enabled() && nBits > 128)
//{
// FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM);
// fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw);
//}
// else
{
std::vector<long long> freq(HUF_ENCSIZE);
std::vector<HufDec> hdec(HUF_DECSIZE);
hufClearDecTable(&hdec.at(0));
hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM,
&freq.at(0));
{
if (nBits > 8 * (nCompressed - (ptr - compressed))) {
return false;
}
hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0));
hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(),
raw->data());
}
// catch (...)
//{
// hufFreeDecTable (hdec);
// throw;
//}
hufFreeDecTable(&hdec.at(0));
}
return true;
}
//
// Functions to compress the range of values in the pixel data
//
const int USHORT_RANGE = (1 << 16);
const int BITMAP_SIZE = (USHORT_RANGE >> 3);
static void bitmapFromData(const unsigned short data[/*nData*/], int nData,
unsigned char bitmap[BITMAP_SIZE],
unsigned short &minNonZero,
unsigned short &maxNonZero) {
for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0;
for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7));
bitmap[0] &= ~1; // zero is not explicitly stored in
// the bitmap; we assume that the
// data always contain zeroes
minNonZero = BITMAP_SIZE - 1;
maxNonZero = 0;
for (int i = 0; i < BITMAP_SIZE; ++i) {
if (bitmap[i]) {
if (minNonZero > i) minNonZero = i;
if (maxNonZero < i) maxNonZero = i;
}
}
}
static unsigned short forwardLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7))))
lut[i] = k++;
else
lut[i] = 0;
}
return k - 1; // maximum value stored in lut[],
} // i.e. number of ones in bitmap minus 1
static unsigned short reverseLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i;
}
int n = k - 1;
while (k < USHORT_RANGE) lut[k++] = 0;
return n; // maximum k where lut[k] is non-zero,
} // i.e. number of ones in bitmap minus 1
static void applyLut(const unsigned short lut[USHORT_RANGE],
unsigned short data[/*nData*/], int nData) {
for (int i = 0; i < nData; ++i) data[i] = lut[data[i]];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif // __clang__
#ifdef _MSC_VER
#pragma warning(pop)
#endif
static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize,
const unsigned char *inPtr, size_t inSize,
const std::vector<ChannelInfo> &channelInfo,
int data_width, int num_lines) {
std::vector<unsigned char> bitmap(BITMAP_SIZE);
unsigned short minNonZero;
unsigned short maxNonZero;
#if !TINYEXR_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
// Assume `inSize` is multiple of 2 or 4.
std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short));
std::vector<PIZChannelData> channelData(channelInfo.size());
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t c = 0; c < channelData.size(); c++) {
PIZChannelData &cd = channelData[c];
cd.start = tmpBufferEnd;
cd.end = cd.start;
cd.nx = data_width;
cd.ny = num_lines;
// cd.ys = c.channel().ySampling;
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (channelInfo[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
cd.size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += cd.nx * cd.ny * cd.size;
}
const unsigned char *ptr = inPtr;
for (int y = 0; y < num_lines; ++y) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(cd.end, ptr, n * sizeof(unsigned short));
ptr += n * sizeof(unsigned short);
cd.end += n;
}
}
bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()),
bitmap.data(), minNonZero, maxNonZero);
std::vector<unsigned short> lut(USHORT_RANGE);
unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data());
applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()));
//
// Store range compression info in _outBuffer
//
char *buf = reinterpret_cast<char *>(outPtr);
memcpy(buf, &minNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
memcpy(buf, &maxNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
if (minNonZero <= maxNonZero) {
memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero),
maxNonZero - minNonZero + 1);
buf += maxNonZero - minNonZero + 1;
}
//
// Apply wavelet encoding
//
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Apply Huffman encoding; append the result to _outBuffer
//
// length header(4byte), then huff data. Initialize length header with zero,
// then later fill it by `length`.
char *lengthPtr = buf;
int zero = 0;
memcpy(buf, &zero, sizeof(int));
buf += sizeof(int);
int length =
hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf);
memcpy(lengthPtr, &length, sizeof(int));
(*outSize) = static_cast<unsigned int>(
(reinterpret_cast<unsigned char *>(buf) - outPtr) +
static_cast<unsigned int>(length));
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if ((*outSize) >= inSize) {
(*outSize) = static_cast<unsigned int>(inSize);
memcpy(outPtr, inPtr, inSize);
}
return true;
}
static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr,
size_t tmpBufSizeInBytes, size_t inLen, int num_channels,
const EXRChannelInfo *channels, int data_width,
int num_lines) {
if (inLen == tmpBufSizeInBytes) {
// Data is not compressed(Issue 40).
memcpy(outPtr, inPtr, inLen);
return true;
}
std::vector<unsigned char> bitmap(BITMAP_SIZE);
unsigned short minNonZero;
unsigned short maxNonZero;
#if !TINYEXR_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
memset(bitmap.data(), 0, BITMAP_SIZE);
const unsigned char *ptr = inPtr;
// minNonZero = *(reinterpret_cast<const unsigned short *>(ptr));
tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr));
// maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2));
tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2));
ptr += 4;
if (maxNonZero >= BITMAP_SIZE) {
return false;
}
if (minNonZero <= maxNonZero) {
memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr,
maxNonZero - minNonZero + 1);
ptr += maxNonZero - minNonZero + 1;
}
std::vector<unsigned short> lut(USHORT_RANGE);
memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE);
unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data());
//
// Huffman decoding
//
int length;
// length = *(reinterpret_cast<const int *>(ptr));
tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr));
ptr += sizeof(int);
if (size_t((ptr - inPtr) + length) > inLen) {
return false;
}
std::vector<unsigned short> tmpBuffer(tmpBufSizeInBytes / sizeof(unsigned short));
hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer);
//
// Wavelet decoding
//
std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels));
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) {
const EXRChannelInfo &chan = channels[i];
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
channelData[i].start = tmpBufferEnd;
channelData[i].end = channelData[i].start;
channelData[i].nx = data_width;
channelData[i].ny = num_lines;
// channelData[i].ys = 1;
channelData[i].size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size;
}
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Expand the pixel data to their original range
//
applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSizeInBytes / sizeof(unsigned short)));
for (int y = 0; y < num_lines; y++) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short)));
outPtr += n * sizeof(unsigned short);
cd.end += n;
}
}
return true;
}
#endif // TINYEXR_USE_PIZ
#if TINYEXR_USE_ZFP
struct ZFPCompressionParam {
double rate;
unsigned int precision;
unsigned int __pad0;
double tolerance;
int type; // TINYEXR_ZFP_COMPRESSIONTYPE_*
unsigned int __pad1;
ZFPCompressionParam() {
type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE;
rate = 2.0;
precision = 0;
tolerance = 0.0;
}
};
static bool FindZFPCompressionParam(ZFPCompressionParam *param,
const EXRAttribute *attributes,
int num_attributes, std::string *err) {
bool foundType = false;
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionType") == 0)) {
if (attributes[i].size == 1) {
param->type = static_cast<int>(attributes[i].value[0]);
foundType = true;
break;
} else {
if (err) {
(*err) +=
"zfpCompressionType attribute must be uchar(1 byte) type.\n";
}
return false;
}
}
}
if (!foundType) {
if (err) {
(*err) += "`zfpCompressionType` attribute not found.\n";
}
return false;
}
if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) &&
(attributes[i].size == 8)) {
param->rate = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
if (err) {
(*err) += "`zfpCompressionRate` attribute not found.\n";
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) &&
(attributes[i].size == 4)) {
param->rate = *(reinterpret_cast<int *>(attributes[i].value));
return true;
}
}
if (err) {
(*err) += "`zfpCompressionPrecision` attribute not found.\n";
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) &&
(attributes[i].size == 8)) {
param->tolerance = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
if (err) {
(*err) += "`zfpCompressionTolerance` attribute not found.\n";
}
} else {
if (err) {
(*err) += "Unknown value specified for `zfpCompressionType`.\n";
}
}
return false;
}
// Assume pixel format is FLOAT for all channels.
static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines,
size_t num_channels, const unsigned char *src,
unsigned long src_size,
const ZFPCompressionParam ¶m) {
size_t uncompressed_size =
size_t(dst_width) * size_t(dst_num_lines) * num_channels;
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
}
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((dst_width % 4) == 0);
assert((dst_num_lines % 4) == 0);
if ((size_t(dst_width) & 3U) || (size_t(dst_num_lines) & 3U)) {
return false;
}
field =
zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)),
zfp_type_float, static_cast<unsigned int>(dst_width),
static_cast<unsigned int>(dst_num_lines) *
static_cast<unsigned int>(num_channels));
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimension */ 2,
/* write random access */ 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
std::vector<unsigned char> buf(buf_size);
memcpy(&buf.at(0), src, src_size);
bitstream *stream = stream_open(&buf.at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_stream_rewind(zfp);
size_t image_size = size_t(dst_width) * size_t(dst_num_lines);
for (size_t c = 0; c < size_t(num_channels); c++) {
// decompress 4x4 pixel block.
for (size_t y = 0; y < size_t(dst_num_lines); y += 4) {
for (size_t x = 0; x < size_t(dst_width); x += 4) {
float fblock[16];
zfp_decode_block_float_2(zfp, fblock);
for (size_t j = 0; j < 4; j++) {
for (size_t i = 0; i < 4; i++) {
dst[c * image_size + ((y + j) * size_t(dst_width) + (x + i))] =
fblock[j * 4 + i];
}
}
}
}
}
zfp_field_free(field);
zfp_stream_close(zfp);
stream_close(stream);
return true;
}
// Assume pixel format is FLOAT for all channels.
static bool CompressZfp(std::vector<unsigned char> *outBuf,
unsigned int *outSize, const float *inPtr, int width,
int num_lines, int num_channels,
const ZFPCompressionParam ¶m) {
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((width % 4) == 0);
assert((num_lines % 4) == 0);
if ((size_t(width) & 3U) || (size_t(num_lines) & 3U)) {
return false;
}
// create input array.
field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)),
zfp_type_float, static_cast<unsigned int>(width),
static_cast<unsigned int>(num_lines * num_channels));
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
outBuf->resize(buf_size);
bitstream *stream = stream_open(&outBuf->at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_field_free(field);
size_t image_size = size_t(width) * size_t(num_lines);
for (size_t c = 0; c < size_t(num_channels); c++) {
// compress 4x4 pixel block.
for (size_t y = 0; y < size_t(num_lines); y += 4) {
for (size_t x = 0; x < size_t(width); x += 4) {
float fblock[16];
for (size_t j = 0; j < 4; j++) {
for (size_t i = 0; i < 4; i++) {
fblock[j * 4 + i] =
inPtr[c * image_size + ((y + j) * size_t(width) + (x + i))];
}
}
zfp_encode_block_float_2(zfp, fblock);
}
}
}
zfp_stream_flush(zfp);
(*outSize) = static_cast<unsigned int>(zfp_stream_compressed_size(zfp));
zfp_stream_close(zfp);
return true;
}
#endif
//
// -----------------------------------------------------------------
//
// heuristics
#define TINYEXR_DIMENSION_THRESHOLD (1024 * 8192)
// TODO(syoyo): Refactor function arguments.
static bool DecodePixelData(/* out */ unsigned char **out_images,
const int *requested_pixel_types,
const unsigned char *data_ptr, size_t data_len,
int compression_type, int line_order, int width,
int height, int x_stride, int y, int line_no,
int num_lines, size_t pixel_data_size,
size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ
#if TINYEXR_USE_PIZ
if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) {
// Invalid input #90
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(
static_cast<size_t>(width * num_lines) * pixel_data_size));
size_t tmpBufLen = outBuf.size();
bool ret = tinyexr::DecompressPiz(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen,
data_len, static_cast<int>(num_channels), channels, width, num_lines);
if (!ret) {
return false;
}
// For PIZ_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
FP16 hf;
// hf.u = line_ptr[u];
// use `cpy` to avoid unaligned memory access when compiler's
// optimization is on.
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
size_t offset = 0;
if (line_order == 0) {
offset = (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
offset = static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
image += offset;
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(&outBuf.at(
v * pixel_data_size * static_cast<size_t>(x_stride) +
channel_offset_list[c] * static_cast<size_t>(x_stride)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
}
}
#else
assert(0 && "PIZ is enabled in this build");
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS ||
compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
assert(dstLen > 0);
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For ZIP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
size_t offset = 0;
if (line_order == 0) {
offset = (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
offset = (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
image += offset;
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
if (dstLen == 0) {
return false;
}
if (!tinyexr::DecompressRle(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For RLE_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
std::string e;
if (!tinyexr::FindZFPCompressionParam(&zfp_compression_param, attributes,
int(num_attributes), &e)) {
// This code path should not be reachable.
assert(0);
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = outBuf.size();
assert(dstLen > 0);
tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width,
num_lines, num_channels, data_ptr,
static_cast<unsigned long>(data_len),
zfp_compression_param);
// For ZFP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
#else
(void)attributes;
(void)num_attributes;
(void)num_channels;
assert(0);
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
for (size_t c = 0; c < num_channels; c++) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
const unsigned short *line_ptr =
reinterpret_cast<const unsigned short *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *outLine =
reinterpret_cast<unsigned short *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
outLine[u] = hf.u;
}
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
(data_ptr + data_len)) {
// Insufficient data size
return false;
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
// address may not be aliged. use byte-wise copy for safety.#76
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
tinyexr::FP32 f32 = half_to_float(hf);
outLine[u] = f32.f;
}
} else {
assert(0);
return false;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
const float *line_ptr = reinterpret_cast<const float *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
(data_ptr + data_len)) {
// Insufficient data size
return false;
}
for (int u = 0; u < width; u++) {
float val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
unsigned int *outLine =
reinterpret_cast<unsigned int *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
for (int u = 0; u < width; u++) {
if (reinterpret_cast<const unsigned char *>(line_ptr + u) >=
(data_ptr + data_len)) {
// Corrupsed data?
return false;
}
unsigned int val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
}
}
}
}
return true;
}
static bool DecodeTiledPixelData(
unsigned char **out_images, int *width, int *height,
const int *requested_pixel_types, const unsigned char *data_ptr,
size_t data_len, int compression_type, int line_order, int data_width,
int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x,
int tile_size_y, size_t pixel_data_size, size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
// Here, data_width and data_height are the dimensions of the current (sub)level.
if (tile_size_x * tile_offset_x > data_width ||
tile_size_y * tile_offset_y > data_height) {
return false;
}
// Compute actual image size in a tile.
if ((tile_offset_x + 1) * tile_size_x >= data_width) {
(*width) = data_width - (tile_offset_x * tile_size_x);
} else {
(*width) = tile_size_x;
}
if ((tile_offset_y + 1) * tile_size_y >= data_height) {
(*height) = data_height - (tile_offset_y * tile_size_y);
} else {
(*height) = tile_size_y;
}
// Image size = tile size.
return DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len,
compression_type, line_order, (*width), tile_size_y,
/* stride */ tile_size_x, /* y */ 0, /* line_no */ 0,
(*height), pixel_data_size, num_attributes, attributes,
num_channels, channels, channel_offset_list);
}
static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list,
int *pixel_data_size, size_t *channel_offset,
int num_channels,
const EXRChannelInfo *channels) {
channel_offset_list->resize(static_cast<size_t>(num_channels));
(*pixel_data_size) = 0;
(*channel_offset) = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
(*channel_offset_list)[c] = (*channel_offset);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
(*pixel_data_size) += sizeof(unsigned short);
(*channel_offset) += sizeof(unsigned short);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
(*pixel_data_size) += sizeof(float);
(*channel_offset) += sizeof(float);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
(*pixel_data_size) += sizeof(unsigned int);
(*channel_offset) += sizeof(unsigned int);
} else {
// ???
return false;
}
}
return true;
}
static unsigned char **AllocateImage(int num_channels,
const EXRChannelInfo *channels,
const int *requested_pixel_types,
int data_width, int data_height) {
unsigned char **images =
reinterpret_cast<unsigned char **>(static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(num_channels))));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
size_t data_len =
static_cast<size_t>(data_width) * static_cast<size_t>(data_height);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
// pixel_data_size += sizeof(unsigned short);
// channel_offset += sizeof(unsigned short);
// Alloc internal image for half type.
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
images[c] =
reinterpret_cast<unsigned char *>(static_cast<unsigned short *>(
malloc(sizeof(unsigned short) * data_len)));
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
// pixel_data_size += sizeof(float);
// channel_offset += sizeof(float);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
// pixel_data_size += sizeof(unsigned int);
// channel_offset += sizeof(unsigned int);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len)));
} else {
assert(0);
}
}
return images;
}
#ifdef _WIN32
static inline std::wstring UTF8ToWchar(const std::string &str) {
int wstr_size =
MultiByteToWideChar(CP_UTF8, 0, str.data(), (int)str.size(), NULL, 0);
std::wstring wstr(wstr_size, 0);
MultiByteToWideChar(CP_UTF8, 0, str.data(), (int)str.size(), &wstr[0],
(int)wstr.size());
return wstr;
}
#endif
static int ParseEXRHeader(HeaderInfo *info, bool *empty_header,
const EXRVersion *version, std::string *err,
const unsigned char *buf, size_t size) {
const char *marker = reinterpret_cast<const char *>(&buf[0]);
if (empty_header) {
(*empty_header) = false;
}
if (version->multipart) {
if (size > 0 && marker[0] == '\0') {
// End of header list.
if (empty_header) {
(*empty_header) = true;
}
return TINYEXR_SUCCESS;
}
}
// According to the spec, the header of every OpenEXR file must contain at
// least the following attributes:
//
// channels chlist
// compression compression
// dataWindow box2i
// displayWindow box2i
// lineOrder lineOrder
// pixelAspectRatio float
// screenWindowCenter v2f
// screenWindowWidth float
bool has_channels = false;
bool has_compression = false;
bool has_data_window = false;
bool has_display_window = false;
bool has_line_order = false;
bool has_pixel_aspect_ratio = false;
bool has_screen_window_center = false;
bool has_screen_window_width = false;
bool has_name = false;
bool has_type = false;
info->name.clear();
info->type.clear();
info->data_window.min_x = 0;
info->data_window.min_y = 0;
info->data_window.max_x = 0;
info->data_window.max_y = 0;
info->line_order = 0; // @fixme
info->display_window.min_x = 0;
info->display_window.min_y = 0;
info->display_window.max_x = 0;
info->display_window.max_y = 0;
info->screen_window_center[0] = 0.0f;
info->screen_window_center[1] = 0.0f;
info->screen_window_width = -1.0f;
info->pixel_aspect_ratio = -1.0f;
info->tiled = 0;
info->tile_size_x = -1;
info->tile_size_y = -1;
info->tile_level_mode = -1;
info->tile_rounding_mode = -1;
info->attributes.clear();
// Read attributes
size_t orig_size = size;
for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) {
if (0 == size) {
if (err) {
(*err) += "Insufficient data size for attributes.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
if (err) {
(*err) += "Failed to read attribute.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
// For a multipart file, the version field 9th bit is 0.
if ((version->tiled || version->multipart || version->non_image) && attr_name.compare("tiles") == 0) {
unsigned int x_size, y_size;
unsigned char tile_mode;
if (data.size() != 9) {
if (err) {
(*err) += "(ParseEXRHeader) Invalid attribute data size. Attribute data size must be 9.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
assert(data.size() == 9);
memcpy(&x_size, &data.at(0), sizeof(int));
memcpy(&y_size, &data.at(4), sizeof(int));
tile_mode = data[8];
tinyexr::swap4(&x_size);
tinyexr::swap4(&y_size);
if (x_size > static_cast<unsigned int>(std::numeric_limits<int>::max()) ||
y_size > static_cast<unsigned int>(std::numeric_limits<int>::max())) {
if (err) {
(*err) = "Tile sizes were invalid.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
info->tile_size_x = static_cast<int>(x_size);
info->tile_size_y = static_cast<int>(y_size);
// mode = levelMode + roundingMode * 16
info->tile_level_mode = tile_mode & 0x3;
info->tile_rounding_mode = (tile_mode >> 4) & 0x1;
info->tiled = 1;
} else if (attr_name.compare("compression") == 0) {
bool ok = false;
if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) {
ok = true;
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
ok = true;
#else
if (err) {
(*err) = "PIZ compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
ok = true;
#else
if (err) {
(*err) = "ZFP compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (!ok) {
if (err) {
(*err) = "Unknown compression type.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
info->compression_type = static_cast<int>(data[0]);
has_compression = true;
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!ReadChannelInfo(info->channels, data)) {
if (err) {
(*err) += "Failed to parse channel info.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
if (info->channels.size() < 1) {
if (err) {
(*err) += "# of channels is zero.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
has_channels = true;
} else if (attr_name.compare("dataWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->data_window.min_x, &data.at(0), sizeof(int));
memcpy(&info->data_window.min_y, &data.at(4), sizeof(int));
memcpy(&info->data_window.max_x, &data.at(8), sizeof(int));
memcpy(&info->data_window.max_y, &data.at(12), sizeof(int));
tinyexr::swap4(&info->data_window.min_x);
tinyexr::swap4(&info->data_window.min_y);
tinyexr::swap4(&info->data_window.max_x);
tinyexr::swap4(&info->data_window.max_y);
has_data_window = true;
}
} else if (attr_name.compare("displayWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->display_window.min_x, &data.at(0), sizeof(int));
memcpy(&info->display_window.min_y, &data.at(4), sizeof(int));
memcpy(&info->display_window.max_x, &data.at(8), sizeof(int));
memcpy(&info->display_window.max_y, &data.at(12), sizeof(int));
tinyexr::swap4(&info->display_window.min_x);
tinyexr::swap4(&info->display_window.min_y);
tinyexr::swap4(&info->display_window.max_x);
tinyexr::swap4(&info->display_window.max_y);
has_display_window = true;
}
} else if (attr_name.compare("lineOrder") == 0) {
if (data.size() >= 1) {
info->line_order = static_cast<int>(data[0]);
has_line_order = true;
}
} else if (attr_name.compare("pixelAspectRatio") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float));
tinyexr::swap4(&info->pixel_aspect_ratio);
has_pixel_aspect_ratio = true;
}
} else if (attr_name.compare("screenWindowCenter") == 0) {
if (data.size() >= 8) {
memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float));
memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float));
tinyexr::swap4(&info->screen_window_center[0]);
tinyexr::swap4(&info->screen_window_center[1]);
has_screen_window_center = true;
}
} else if (attr_name.compare("screenWindowWidth") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->screen_window_width, &data.at(0), sizeof(float));
tinyexr::swap4(&info->screen_window_width);
has_screen_window_width = true;
}
} else if (attr_name.compare("chunkCount") == 0) {
if (data.size() >= sizeof(int)) {
memcpy(&info->chunk_count, &data.at(0), sizeof(int));
tinyexr::swap4(&info->chunk_count);
}
} else if (attr_name.compare("name") == 0) {
if (!data.empty() && data[0]) {
data.push_back(0);
size_t len = strlen(reinterpret_cast<const char*>(&data[0]));
info->name.resize(len);
info->name.assign(reinterpret_cast<const char*>(&data[0]), len);
has_name = true;
}
} else if (attr_name.compare("type") == 0) {
if (!data.empty() && data[0]) {
data.push_back(0);
size_t len = strlen(reinterpret_cast<const char*>(&data[0]));
info->type.resize(len);
info->type.assign(reinterpret_cast<const char*>(&data[0]), len);
has_type = true;
}
} else {
// Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES)
if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
EXRAttribute attrib;
#ifdef _MSC_VER
strncpy_s(attrib.name, attr_name.c_str(), 255);
strncpy_s(attrib.type, attr_type.c_str(), 255);
#else
strncpy(attrib.name, attr_name.c_str(), 255);
strncpy(attrib.type, attr_type.c_str(), 255);
#endif
attrib.name[255] = '\0';
attrib.type[255] = '\0';
attrib.size = static_cast<int>(data.size());
attrib.value = static_cast<unsigned char *>(malloc(data.size()));
memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0),
data.size());
info->attributes.push_back(attrib);
}
}
}
// Check if required attributes exist
{
std::stringstream ss_err;
if (!has_compression) {
ss_err << "\"compression\" attribute not found in the header."
<< std::endl;
}
if (!has_channels) {
ss_err << "\"channels\" attribute not found in the header." << std::endl;
}
if (!has_line_order) {
ss_err << "\"lineOrder\" attribute not found in the header." << std::endl;
}
if (!has_display_window) {
ss_err << "\"displayWindow\" attribute not found in the header."
<< std::endl;
}
if (!has_data_window) {
ss_err << "\"dataWindow\" attribute not found in the header or invalid."
<< std::endl;
}
if (!has_pixel_aspect_ratio) {
ss_err << "\"pixelAspectRatio\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_width) {
ss_err << "\"screenWindowWidth\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_center) {
ss_err << "\"screenWindowCenter\" attribute not found in the header."
<< std::endl;
}
if (version->multipart || version->non_image) {
if (!has_name) {
ss_err << "\"name\" attribute not found in the header."
<< std::endl;
}
if (!has_type) {
ss_err << "\"type\" attribute not found in the header."
<< std::endl;
}
}
if (!(ss_err.str().empty())) {
if (err) {
(*err) += ss_err.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
}
info->header_len = static_cast<unsigned int>(orig_size - size);
return TINYEXR_SUCCESS;
}
// C++ HeaderInfo to C EXRHeader conversion.
static bool ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info, std::string *warn, std::string *err) {
exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio;
exr_header->screen_window_center[0] = info.screen_window_center[0];
exr_header->screen_window_center[1] = info.screen_window_center[1];
exr_header->screen_window_width = info.screen_window_width;
exr_header->chunk_count = info.chunk_count;
exr_header->display_window.min_x = info.display_window.min_x;
exr_header->display_window.min_y = info.display_window.min_y;
exr_header->display_window.max_x = info.display_window.max_x;
exr_header->display_window.max_y = info.display_window.max_y;
exr_header->data_window.min_x = info.data_window.min_x;
exr_header->data_window.min_y = info.data_window.min_y;
exr_header->data_window.max_x = info.data_window.max_x;
exr_header->data_window.max_y = info.data_window.max_y;
exr_header->line_order = info.line_order;
exr_header->compression_type = info.compression_type;
exr_header->tiled = info.tiled;
exr_header->tile_size_x = info.tile_size_x;
exr_header->tile_size_y = info.tile_size_y;
exr_header->tile_level_mode = info.tile_level_mode;
exr_header->tile_rounding_mode = info.tile_rounding_mode;
EXRSetNameAttr(exr_header, info.name.c_str());
bool valid = true;
if (!info.type.empty()) {
if (info.type == "scanlineimage") {
if (exr_header->tiled) {
if (err) {
(*err) += "(ConvertHeader) tiled bit must be off for `scanlineimage` type.\n";
}
valid = false;
}
} else if (info.type == "tiledimage") {
if (!exr_header->tiled) {
if (err) {
(*err) += "(ConvertHeader) tiled bit must be on for `tiledimage` type.\n";
}
valid = false;
}
} else if (info.type == "deeptile") {
exr_header->non_image = 1;
if (!exr_header->tiled) {
if (err) {
(*err) += "(ConvertHeader) tiled bit must be on for `deeptile` type.\n";
}
valid = false;
}
} else if (info.type == "deepscanline") {
exr_header->non_image = 1;
if (exr_header->tiled) {
if (err) {
(*err) += "(ConvertHeader) tiled bit must be off for `deepscanline` type.\n";
}
valid = false;
}
} else {
if (warn) {
std::stringstream ss;
ss << "(ConvertHeader) Unsupported or unknown info.type: " << info.type << "\n";
(*warn) += ss.str();
}
}
}
exr_header->num_channels = static_cast<int>(info.channels.size());
exr_header->channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
#ifdef _MSC_VER
strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#else
strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#endif
// manually add '\0' for safety.
exr_header->channels[c].name[255] = '\0';
exr_header->channels[c].pixel_type = info.channels[c].pixel_type;
exr_header->channels[c].p_linear = info.channels[c].p_linear;
exr_header->channels[c].x_sampling = info.channels[c].x_sampling;
exr_header->channels[c].y_sampling = info.channels[c].y_sampling;
}
exr_header->pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->pixel_types[c] = info.channels[c].pixel_type;
}
// Initially fill with values of `pixel_types`
exr_header->requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->requested_pixel_types[c] = info.channels[c].pixel_type;
}
exr_header->num_custom_attributes = static_cast<int>(info.attributes.size());
if (exr_header->num_custom_attributes > 0) {
// TODO(syoyo): Report warning when # of attributes exceeds
// `TINYEXR_MAX_CUSTOM_ATTRIBUTES`
if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES;
}
exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc(
sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes)));
for (size_t i = 0; i < info.attributes.size(); i++) {
memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name,
256);
memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type,
256);
exr_header->custom_attributes[i].size = info.attributes[i].size;
// Just copy pointer
exr_header->custom_attributes[i].value = info.attributes[i].value;
}
} else {
exr_header->custom_attributes = NULL;
}
exr_header->header_len = info.header_len;
return true;
}
struct OffsetData {
OffsetData() : num_x_levels(0), num_y_levels(0) {}
std::vector<std::vector<std::vector <tinyexr::tinyexr_uint64> > > offsets;
int num_x_levels;
int num_y_levels;
};
int LevelIndex(int lx, int ly, int tile_level_mode, int num_x_levels) {
switch (tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
return 0;
case TINYEXR_TILE_MIPMAP_LEVELS:
return lx;
case TINYEXR_TILE_RIPMAP_LEVELS:
return lx + ly * num_x_levels;
default:
assert(false);
}
return 0;
}
static int LevelSize(int toplevel_size, int level, int tile_rounding_mode) {
assert(level >= 0);
int b = (int)(1u << (unsigned)level);
int level_size = toplevel_size / b;
if (tile_rounding_mode == TINYEXR_TILE_ROUND_UP && level_size * b < toplevel_size)
level_size += 1;
return std::max(level_size, 1);
}
static int DecodeTiledLevel(EXRImage* exr_image, const EXRHeader* exr_header,
const OffsetData& offset_data,
const std::vector<size_t>& channel_offset_list,
int pixel_data_size,
const unsigned char* head, const size_t size,
std::string* err) {
int num_channels = exr_header->num_channels;
int level_index = LevelIndex(exr_image->level_x, exr_image->level_y, exr_header->tile_level_mode, offset_data.num_x_levels);
int num_y_tiles = (int)offset_data.offsets[level_index].size();
assert(num_y_tiles);
int num_x_tiles = (int)offset_data.offsets[level_index][0].size();
assert(num_x_tiles);
int num_tiles = num_x_tiles * num_y_tiles;
int err_code = TINYEXR_SUCCESS;
enum {
EF_SUCCESS = 0,
EF_INVALID_DATA = 1,
EF_INSUFFICIENT_DATA = 2,
EF_FAILED_TO_DECODE = 4
};
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<unsigned> error_flag(EF_SUCCESS);
#else
unsigned error_flag(EF_SUCCESS);
#endif
// Although the spec says : "...the data window is subdivided into an array of smaller rectangles...",
// the IlmImf library allows the dimensions of the tile to be larger (or equal) than the dimensions of the data window.
#if 0
if ((exr_header->tile_size_x > exr_image->width || exr_header->tile_size_y > exr_image->height) &&
exr_image->level_x == 0 && exr_image->level_y == 0) {
if (err) {
(*err) += "Failed to decode tile data.\n";
}
err_code = TINYEXR_ERROR_INVALID_DATA;
}
#endif
exr_image->tiles = static_cast<EXRTile*>(
calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles)));
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<int> tile_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_tiles)) {
num_threads = int(num_tiles);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]()
{
int tile_idx = 0;
while ((tile_idx = tile_count++) < num_tiles) {
#else
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int tile_idx = 0; tile_idx < num_tiles; tile_idx++) {
#endif
// Allocate memory for each tile.
exr_image->tiles[tile_idx].images = tinyexr::AllocateImage(
num_channels, exr_header->channels,
exr_header->requested_pixel_types, exr_header->tile_size_x,
exr_header->tile_size_y);
int x_tile = tile_idx % num_x_tiles;
int y_tile = tile_idx / num_x_tiles;
// 16 byte: tile coordinates
// 4 byte : data size
// ~ : data(uncompressed or compressed)
tinyexr::tinyexr_uint64 offset = offset_data.offsets[level_index][y_tile][x_tile];
if (offset + sizeof(int) * 5 > size) {
// Insufficient data size.
error_flag |= EF_INSUFFICIENT_DATA;
continue;
}
size_t data_size =
size_t(size - (offset + sizeof(int) * 5));
const unsigned char* data_ptr =
reinterpret_cast<const unsigned char*>(head + offset);
int tile_coordinates[4];
memcpy(tile_coordinates, data_ptr, sizeof(int) * 4);
tinyexr::swap4(&tile_coordinates[0]);
tinyexr::swap4(&tile_coordinates[1]);
tinyexr::swap4(&tile_coordinates[2]);
tinyexr::swap4(&tile_coordinates[3]);
if (tile_coordinates[2] != exr_image->level_x) {
// Invalid data.
error_flag |= EF_INVALID_DATA;
continue;
}
if (tile_coordinates[3] != exr_image->level_y) {
// Invalid data.
error_flag |= EF_INVALID_DATA;
continue;
}
int data_len;
memcpy(&data_len, data_ptr + 16,
sizeof(int)); // 16 = sizeof(tile_coordinates)
tinyexr::swap4(&data_len);
if (data_len < 2 || size_t(data_len) > data_size) {
// Insufficient data size.
error_flag |= EF_INSUFFICIENT_DATA;
continue;
}
// Move to data addr: 20 = 16 + 4;
data_ptr += 20;
bool ret = tinyexr::DecodeTiledPixelData(
exr_image->tiles[tile_idx].images,
&(exr_image->tiles[tile_idx].width),
&(exr_image->tiles[tile_idx].height),
exr_header->requested_pixel_types, data_ptr,
static_cast<size_t>(data_len), exr_header->compression_type,
exr_header->line_order,
exr_image->width, exr_image->height,
tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x,
exr_header->tile_size_y, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels),
exr_header->channels, channel_offset_list);
if (!ret) {
// Failed to decode tile data.
error_flag |= EF_FAILED_TO_DECODE;
}
exr_image->tiles[tile_idx].offset_x = tile_coordinates[0];
exr_image->tiles[tile_idx].offset_y = tile_coordinates[1];
exr_image->tiles[tile_idx].level_x = tile_coordinates[2];
exr_image->tiles[tile_idx].level_y = tile_coordinates[3];
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
} // num_thread loop
for (auto& t : workers) {
t.join();
}
#else
} // parallel for
#endif
// Even in the event of an error, the reserved memory may be freed.
exr_image->num_channels = num_channels;
exr_image->num_tiles = static_cast<int>(num_tiles);
if (error_flag) err_code = TINYEXR_ERROR_INVALID_DATA;
if (err) {
if (error_flag & EF_INSUFFICIENT_DATA) {
(*err) += "Insufficient data length.\n";
}
if (error_flag & EF_FAILED_TO_DECODE) {
(*err) += "Failed to decode tile data.\n";
}
}
return err_code;
}
static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header,
const OffsetData& offset_data,
const unsigned char *head, const size_t size,
std::string *err) {
int num_channels = exr_header->num_channels;
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
if (!FindZFPCompressionParam(&zfp_compression_param,
exr_header->custom_attributes,
int(exr_header->num_custom_attributes), err)) {
return TINYEXR_ERROR_INVALID_HEADER;
}
#endif
}
if (exr_header->data_window.max_x < exr_header->data_window.min_x ||
exr_header->data_window.max_y < exr_header->data_window.min_y) {
if (err) {
(*err) += "Invalid data window.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
int data_width =
exr_header->data_window.max_x - exr_header->data_window.min_x + 1;
int data_height =
exr_header->data_window.max_y - exr_header->data_window.min_y + 1;
// Do not allow too large data_width and data_height. header invalid?
{
if ((data_width > TINYEXR_DIMENSION_THRESHOLD) || (data_height > TINYEXR_DIMENSION_THRESHOLD)) {
if (err) {
std::stringstream ss;
ss << "data_with or data_height too large. data_width: " << data_width
<< ", "
<< "data_height = " << data_height << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
if (exr_header->tiled) {
if ((exr_header->tile_size_x > TINYEXR_DIMENSION_THRESHOLD) || (exr_header->tile_size_y > TINYEXR_DIMENSION_THRESHOLD)) {
if (err) {
std::stringstream ss;
ss << "tile with or tile height too large. tile width: " << exr_header->tile_size_x
<< ", "
<< "tile height = " << exr_header->tile_size_y << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
const std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0];
size_t num_blocks = offsets.size();
std::vector<size_t> channel_offset_list;
int pixel_data_size = 0;
size_t channel_offset = 0;
if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size,
&channel_offset, num_channels,
exr_header->channels)) {
if (err) {
(*err) += "Failed to compute channel layout.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<bool> invalid_data(false);
#else
bool invalid_data(false);
#endif
if (exr_header->tiled) {
// value check
if (exr_header->tile_size_x < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size x : " << exr_header->tile_size_x << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
if (exr_header->tile_size_y < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size y : " << exr_header->tile_size_y << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
if (exr_header->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) {
EXRImage* level_image = NULL;
for (int level = 0; level < offset_data.num_x_levels; ++level) {
if (!level_image) {
level_image = exr_image;
} else {
level_image->next_level = new EXRImage;
InitEXRImage(level_image->next_level);
level_image = level_image->next_level;
}
level_image->width =
LevelSize(exr_header->data_window.max_x - exr_header->data_window.min_x + 1, level, exr_header->tile_rounding_mode);
level_image->height =
LevelSize(exr_header->data_window.max_y - exr_header->data_window.min_y + 1, level, exr_header->tile_rounding_mode);
level_image->level_x = level;
level_image->level_y = level;
int ret = DecodeTiledLevel(level_image, exr_header,
offset_data,
channel_offset_list,
pixel_data_size,
head, size,
err);
if (ret != TINYEXR_SUCCESS) return ret;
}
} else {
EXRImage* level_image = NULL;
for (int level_y = 0; level_y < offset_data.num_y_levels; ++level_y)
for (int level_x = 0; level_x < offset_data.num_x_levels; ++level_x) {
if (!level_image) {
level_image = exr_image;
} else {
level_image->next_level = new EXRImage;
InitEXRImage(level_image->next_level);
level_image = level_image->next_level;
}
level_image->width =
LevelSize(exr_header->data_window.max_x - exr_header->data_window.min_x + 1, level_x, exr_header->tile_rounding_mode);
level_image->height =
LevelSize(exr_header->data_window.max_y - exr_header->data_window.min_y + 1, level_y, exr_header->tile_rounding_mode);
level_image->level_x = level_x;
level_image->level_y = level_y;
int ret = DecodeTiledLevel(level_image, exr_header,
offset_data,
channel_offset_list,
pixel_data_size,
head, size,
err);
if (ret != TINYEXR_SUCCESS) return ret;
}
}
} else { // scanline format
// Don't allow too large image(256GB * pixel_data_size or more). Workaround
// for #104.
size_t total_data_len =
size_t(data_width) * size_t(data_height) * size_t(num_channels);
const bool total_data_len_overflown =
sizeof(void *) == 8 ? (total_data_len >= 0x4000000000) : false;
if ((total_data_len == 0) || total_data_len_overflown) {
if (err) {
std::stringstream ss;
ss << "Image data size is zero or too large: width = " << data_width
<< ", height = " << data_height << ", channels = " << num_channels
<< std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
exr_image->images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
data_width, data_height);
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<int> y_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_blocks)) {
num_threads = int(num_blocks);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
int y = 0;
while ((y = y_count++) < int(num_blocks)) {
#else
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int y = 0; y < static_cast<int>(num_blocks); y++) {
#endif
size_t y_idx = static_cast<size_t>(y);
if (offsets[y_idx] + sizeof(int) * 2 > size) {
invalid_data = true;
} else {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed or compressed)
size_t data_size =
size_t(size - (offsets[y_idx] + sizeof(int) * 2));
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y_idx]);
int line_no;
memcpy(&line_no, data_ptr, sizeof(int));
int data_len;
memcpy(&data_len, data_ptr + 4, sizeof(int));
tinyexr::swap4(&line_no);
tinyexr::swap4(&data_len);
if (size_t(data_len) > data_size) {
invalid_data = true;
} else if ((line_no > (2 << 20)) || (line_no < -(2 << 20))) {
// Too large value. Assume this is invalid
// 2**20 = 1048576 = heuristic value.
invalid_data = true;
} else if (data_len == 0) {
// TODO(syoyo): May be ok to raise the threshold for example
// `data_len < 4`
invalid_data = true;
} else {
// line_no may be negative.
int end_line_no = (std::min)(line_no + num_scanline_blocks,
(exr_header->data_window.max_y + 1));
int num_lines = end_line_no - line_no;
if (num_lines <= 0) {
invalid_data = true;
} else {
// Move to data addr: 8 = 4 + 4;
data_ptr += 8;
// Adjust line_no with data_window.bmin.y
// overflow check
tinyexr_int64 lno =
static_cast<tinyexr_int64>(line_no) -
static_cast<tinyexr_int64>(exr_header->data_window.min_y);
if (lno > std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else if (lno < -std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else {
line_no -= exr_header->data_window.min_y;
}
if (line_no < 0) {
invalid_data = true;
} else {
if (!tinyexr::DecodePixelData(
exr_image->images, exr_header->requested_pixel_types,
data_ptr, static_cast<size_t>(data_len),
exr_header->compression_type, exr_header->line_order,
data_width, data_height, data_width, y, line_no,
num_lines, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(
exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels),
exr_header->channels, channel_offset_list)) {
invalid_data = true;
}
}
}
}
}
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
}
for (auto &t : workers) {
t.join();
}
#else
} // omp parallel
#endif
}
if (invalid_data) {
if (err) {
(*err) += "Invalid data found when decoding pixels.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Overwrite `pixel_type` with `requested_pixel_type`.
{
for (int c = 0; c < exr_header->num_channels; c++) {
exr_header->pixel_types[c] = exr_header->requested_pixel_types[c];
}
}
{
exr_image->num_channels = num_channels;
exr_image->width = data_width;
exr_image->height = data_height;
}
return TINYEXR_SUCCESS;
}
static bool ReconstructLineOffsets(
std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n,
const unsigned char *head, const unsigned char *marker, const size_t size) {
assert(head < marker);
assert(offsets->size() == n);
for (size_t i = 0; i < n; i++) {
size_t offset = static_cast<size_t>(marker - head);
// Offset should not exceed whole EXR file/data size.
if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) {
return false;
}
int y;
unsigned int data_len;
memcpy(&y, marker, sizeof(int));
memcpy(&data_len, marker + 4, sizeof(unsigned int));
if (data_len >= size) {
return false;
}
tinyexr::swap4(&y);
tinyexr::swap4(&data_len);
(*offsets)[i] = offset;
marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len)
}
return true;
}
static int FloorLog2(unsigned x) {
//
// For x > 0, floorLog2(y) returns floor(log(x)/log(2)).
//
int y = 0;
while (x > 1) {
y += 1;
x >>= 1u;
}
return y;
}
static int CeilLog2(unsigned x) {
//
// For x > 0, ceilLog2(y) returns ceil(log(x)/log(2)).
//
int y = 0;
int r = 0;
while (x > 1) {
if (x & 1)
r = 1;
y += 1;
x >>= 1u;
}
return y + r;
}
static int RoundLog2(int x, int tile_rounding_mode) {
return (tile_rounding_mode == TINYEXR_TILE_ROUND_DOWN) ? FloorLog2(static_cast<unsigned>(x)) : CeilLog2(static_cast<unsigned>(x));
}
static int CalculateNumXLevels(const EXRHeader* exr_header) {
int min_x = exr_header->data_window.min_x;
int max_x = exr_header->data_window.max_x;
int min_y = exr_header->data_window.min_y;
int max_y = exr_header->data_window.max_y;
int num = 0;
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
num = 1;
break;
case TINYEXR_TILE_MIPMAP_LEVELS:
{
int w = max_x - min_x + 1;
int h = max_y - min_y + 1;
num = RoundLog2(std::max(w, h), exr_header->tile_rounding_mode) + 1;
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
{
int w = max_x - min_x + 1;
num = RoundLog2(w, exr_header->tile_rounding_mode) + 1;
}
break;
default:
assert(false);
}
return num;
}
static int CalculateNumYLevels(const EXRHeader* exr_header) {
int min_x = exr_header->data_window.min_x;
int max_x = exr_header->data_window.max_x;
int min_y = exr_header->data_window.min_y;
int max_y = exr_header->data_window.max_y;
int num = 0;
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
num = 1;
break;
case TINYEXR_TILE_MIPMAP_LEVELS:
{
int w = max_x - min_x + 1;
int h = max_y - min_y + 1;
num = RoundLog2(std::max(w, h), exr_header->tile_rounding_mode) + 1;
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
{
int h = max_y - min_y + 1;
num = RoundLog2(h, exr_header->tile_rounding_mode) + 1;
}
break;
default:
assert(false);
}
return num;
}
static void CalculateNumTiles(std::vector<int>& numTiles,
int toplevel_size,
int size,
int tile_rounding_mode) {
for (unsigned i = 0; i < numTiles.size(); i++) {
int l = LevelSize(toplevel_size, i, tile_rounding_mode);
assert(l <= std::numeric_limits<int>::max() - size + 1);
numTiles[i] = (l + size - 1) / size;
}
}
static void PrecalculateTileInfo(std::vector<int>& num_x_tiles,
std::vector<int>& num_y_tiles,
const EXRHeader* exr_header) {
int min_x = exr_header->data_window.min_x;
int max_x = exr_header->data_window.max_x;
int min_y = exr_header->data_window.min_y;
int max_y = exr_header->data_window.max_y;
int num_x_levels = CalculateNumXLevels(exr_header);
int num_y_levels = CalculateNumYLevels(exr_header);
num_x_tiles.resize(num_x_levels);
num_y_tiles.resize(num_y_levels);
CalculateNumTiles(num_x_tiles,
max_x - min_x + 1,
exr_header->tile_size_x,
exr_header->tile_rounding_mode);
CalculateNumTiles(num_y_tiles,
max_y - min_y + 1,
exr_header->tile_size_y,
exr_header->tile_rounding_mode);
}
static void InitSingleResolutionOffsets(OffsetData& offset_data, size_t num_blocks) {
offset_data.offsets.resize(1);
offset_data.offsets[0].resize(1);
offset_data.offsets[0][0].resize(num_blocks);
offset_data.num_x_levels = 1;
offset_data.num_y_levels = 1;
}
// Return sum of tile blocks.
static int InitTileOffsets(OffsetData& offset_data,
const EXRHeader* exr_header,
const std::vector<int>& num_x_tiles,
const std::vector<int>& num_y_tiles) {
int num_tile_blocks = 0;
offset_data.num_x_levels = static_cast<int>(num_x_tiles.size());
offset_data.num_y_levels = static_cast<int>(num_y_tiles.size());
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
case TINYEXR_TILE_MIPMAP_LEVELS:
assert(offset_data.num_x_levels == offset_data.num_y_levels);
offset_data.offsets.resize(offset_data.num_x_levels);
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
offset_data.offsets[l].resize(num_y_tiles[l]);
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
offset_data.offsets[l][dy].resize(num_x_tiles[l]);
num_tile_blocks += num_x_tiles[l];
}
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
offset_data.offsets.resize(static_cast<size_t>(offset_data.num_x_levels) * static_cast<size_t>(offset_data.num_y_levels));
for (int ly = 0; ly < offset_data.num_y_levels; ++ly) {
for (int lx = 0; lx < offset_data.num_x_levels; ++lx) {
int l = ly * offset_data.num_x_levels + lx;
offset_data.offsets[l].resize(num_y_tiles[ly]);
for (size_t dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
offset_data.offsets[l][dy].resize(num_x_tiles[lx]);
num_tile_blocks += num_x_tiles[lx];
}
}
}
break;
default:
assert(false);
}
return num_tile_blocks;
}
static bool IsAnyOffsetsAreInvalid(const OffsetData& offset_data) {
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l)
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy)
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx)
if (reinterpret_cast<const tinyexr::tinyexr_int64&>(offset_data.offsets[l][dy][dx]) <= 0)
return true;
return false;
}
static bool isValidTile(const EXRHeader* exr_header,
const OffsetData& offset_data,
int dx, int dy, int lx, int ly) {
if (lx < 0 || ly < 0 || dx < 0 || dy < 0) return false;
int num_x_levels = offset_data.num_x_levels;
int num_y_levels = offset_data.num_y_levels;
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
if (lx == 0 &&
ly == 0 &&
offset_data.offsets.size() > 0 &&
offset_data.offsets[0].size() > static_cast<size_t>(dy) &&
offset_data.offsets[0][dy].size() > static_cast<size_t>(dx)) {
return true;
}
break;
case TINYEXR_TILE_MIPMAP_LEVELS:
if (lx < num_x_levels &&
ly < num_y_levels &&
offset_data.offsets.size() > static_cast<size_t>(lx) &&
offset_data.offsets[lx].size() > static_cast<size_t>(dy) &&
offset_data.offsets[lx][dy].size() > static_cast<size_t>(dx)) {
return true;
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
{
size_t idx = static_cast<size_t>(lx) + static_cast<size_t>(ly)* static_cast<size_t>(num_x_levels);
if (lx < num_x_levels &&
ly < num_y_levels &&
(offset_data.offsets.size() > idx) &&
offset_data.offsets[idx].size() > static_cast<size_t>(dy) &&
offset_data.offsets[idx][dy].size() > static_cast<size_t>(dx)) {
return true;
}
}
break;
default:
return false;
}
return false;
}
static void ReconstructTileOffsets(OffsetData& offset_data,
const EXRHeader* exr_header,
const unsigned char* head, const unsigned char* marker, const size_t /*size*/,
bool isMultiPartFile,
bool isDeep) {
int numXLevels = offset_data.num_x_levels;
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
tinyexr::tinyexr_uint64 tileOffset = marker - head;
if (isMultiPartFile) {
//int partNumber;
marker += sizeof(int);
}
int tileX;
memcpy(&tileX, marker, sizeof(int));
tinyexr::swap4(&tileX);
marker += sizeof(int);
int tileY;
memcpy(&tileY, marker, sizeof(int));
tinyexr::swap4(&tileY);
marker += sizeof(int);
int levelX;
memcpy(&levelX, marker, sizeof(int));
tinyexr::swap4(&levelX);
marker += sizeof(int);
int levelY;
memcpy(&levelY, marker, sizeof(int));
tinyexr::swap4(&levelY);
marker += sizeof(int);
if (isDeep) {
tinyexr::tinyexr_int64 packed_offset_table_size;
memcpy(&packed_offset_table_size, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64*>(&packed_offset_table_size));
marker += sizeof(tinyexr::tinyexr_int64);
tinyexr::tinyexr_int64 packed_sample_size;
memcpy(&packed_sample_size, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64*>(&packed_sample_size));
marker += sizeof(tinyexr::tinyexr_int64);
// next Int64 is unpacked sample size - skip that too
marker += packed_offset_table_size + packed_sample_size + 8;
} else {
int dataSize;
memcpy(&dataSize, marker, sizeof(int));
tinyexr::swap4(&dataSize);
marker += sizeof(int);
marker += dataSize;
}
if (!isValidTile(exr_header, offset_data,
tileX, tileY, levelX, levelY))
return;
int level_idx = LevelIndex(levelX, levelY, exr_header->tile_level_mode, numXLevels);
offset_data.offsets[level_idx][tileY][tileX] = tileOffset;
}
}
}
}
// marker output is also
static int ReadOffsets(OffsetData& offset_data,
const unsigned char* head,
const unsigned char*& marker,
const size_t size,
const char** err) {
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
tinyexr::tinyexr_uint64 offset;
if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) {
tinyexr::SetErrorMessage("Insufficient data size in offset table.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
offset_data.offsets[l][dy][dx] = offset;
}
}
}
return TINYEXR_SUCCESS;
}
static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *head,
const unsigned char *marker, const size_t size,
const char **err) {
if (exr_image == NULL || exr_header == NULL || head == NULL ||
marker == NULL || (size <= tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
if (exr_header->data_window.max_x < exr_header->data_window.min_x ||
exr_header->data_window.max_x - exr_header->data_window.min_x ==
std::numeric_limits<int>::max()) {
// Issue 63
tinyexr::SetErrorMessage("Invalid data width value", err);
return TINYEXR_ERROR_INVALID_DATA;
}
int data_width =
exr_header->data_window.max_x - exr_header->data_window.min_x + 1;
if (exr_header->data_window.max_y < exr_header->data_window.min_y ||
exr_header->data_window.max_y - exr_header->data_window.min_y ==
std::numeric_limits<int>::max()) {
tinyexr::SetErrorMessage("Invalid data height value", err);
return TINYEXR_ERROR_INVALID_DATA;
}
int data_height =
exr_header->data_window.max_y - exr_header->data_window.min_y + 1;
// Do not allow too large data_width and data_height. header invalid?
{
if (data_width > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("data width too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
if (data_height > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("data height too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
if (exr_header->tiled) {
if (exr_header->tile_size_x > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("tile width too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
if (exr_header->tile_size_y > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("tile height too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
// Read offset tables.
OffsetData offset_data;
size_t num_blocks = 0;
// For a multi-resolution image, the size of the offset table will be calculated from the other attributes of the header.
// If chunk_count > 0 then chunk_count must be equal to the calculated tile count.
if (exr_header->tiled) {
{
std::vector<int> num_x_tiles, num_y_tiles;
PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_header);
num_blocks = InitTileOffsets(offset_data, exr_header, num_x_tiles, num_y_tiles);
if (exr_header->chunk_count > 0) {
if (exr_header->chunk_count != static_cast<int>(num_blocks)) {
tinyexr::SetErrorMessage("Invalid offset table size.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
int ret = ReadOffsets(offset_data, head, marker, size, err);
if (ret != TINYEXR_SUCCESS) return ret;
if (IsAnyOffsetsAreInvalid(offset_data)) {
ReconstructTileOffsets(offset_data, exr_header,
head, marker, size,
exr_header->multipart, exr_header->non_image);
}
} else if (exr_header->chunk_count > 0) {
// Use `chunkCount` attribute.
num_blocks = static_cast<size_t>(exr_header->chunk_count);
InitSingleResolutionOffsets(offset_data, num_blocks);
} else {
num_blocks = static_cast<size_t>(data_height) /
static_cast<size_t>(num_scanline_blocks);
if (num_blocks * static_cast<size_t>(num_scanline_blocks) <
static_cast<size_t>(data_height)) {
num_blocks++;
}
InitSingleResolutionOffsets(offset_data, num_blocks);
}
if (!exr_header->tiled) {
std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0];
for (size_t y = 0; y < num_blocks; y++) {
tinyexr::tinyexr_uint64 offset;
// Issue #81
if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) {
tinyexr::SetErrorMessage("Insufficient data size in offset table.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
offsets[y] = offset;
}
// If line offsets are invalid, we try to reconstruct it.
// See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details.
for (size_t y = 0; y < num_blocks; y++) {
if (offsets[y] <= 0) {
// TODO(syoyo) Report as warning?
// if (err) {
// stringstream ss;
// ss << "Incomplete lineOffsets." << std::endl;
// (*err) += ss.str();
//}
bool ret =
ReconstructLineOffsets(&offsets, num_blocks, head, marker, size);
if (ret) {
// OK
break;
} else {
tinyexr::SetErrorMessage(
"Cannot reconstruct lineOffset table in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
}
{
std::string e;
int ret = DecodeChunk(exr_image, exr_header, offset_data, head, size, &e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
#if 1
FreeEXRImage(exr_image);
#else
// release memory(if exists)
if ((exr_header->num_channels > 0) && exr_image && exr_image->images) {
for (size_t c = 0; c < size_t(exr_header->num_channels); c++) {
if (exr_image->images[c]) {
free(exr_image->images[c]);
exr_image->images[c] = NULL;
}
}
free(exr_image->images);
exr_image->images = NULL;
}
#endif
}
return ret;
}
}
static void GetLayers(const EXRHeader &exr_header,
std::vector<std::string> &layer_names) {
// Naive implementation
// Group channels by layers
// go over all channel names, split by periods
// collect unique names
layer_names.clear();
for (int c = 0; c < exr_header.num_channels; c++) {
std::string full_name(exr_header.channels[c].name);
const size_t pos = full_name.find_last_of('.');
if (pos != std::string::npos && pos != 0 && pos + 1 < full_name.size()) {
full_name.erase(pos);
if (std::find(layer_names.begin(), layer_names.end(), full_name) ==
layer_names.end())
layer_names.push_back(full_name);
}
}
}
struct LayerChannel {
explicit LayerChannel(size_t i, std::string n) : index(i), name(n) {}
size_t index;
std::string name;
};
static void ChannelsInLayer(const EXRHeader &exr_header,
const std::string &layer_name,
std::vector<LayerChannel> &channels) {
channels.clear();
for (int c = 0; c < exr_header.num_channels; c++) {
std::string ch_name(exr_header.channels[c].name);
if (layer_name.empty()) {
const size_t pos = ch_name.find_last_of('.');
if (pos != std::string::npos && pos < ch_name.size()) {
ch_name = ch_name.substr(pos + 1);
}
} else {
const size_t pos = ch_name.find(layer_name + '.');
if (pos == std::string::npos) continue;
if (pos == 0) {
ch_name = ch_name.substr(layer_name.size() + 1);
}
}
LayerChannel ch(size_t(c), ch_name);
channels.push_back(ch);
}
}
} // namespace tinyexr
int EXRLayers(const char *filename, const char **layer_names[], int *num_layers,
const char **err) {
EXRVersion exr_version;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
{
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage("Invalid EXR header.", err);
return ret;
}
if (exr_version.multipart || exr_version.non_image) {
tinyexr::SetErrorMessage(
"Loading multipart or DeepImage is not supported in LoadEXR() API",
err);
return TINYEXR_ERROR_INVALID_DATA; // @fixme.
}
}
int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
std::vector<std::string> layer_vec;
tinyexr::GetLayers(exr_header, layer_vec);
(*num_layers) = int(layer_vec.size());
(*layer_names) = static_cast<const char **>(
malloc(sizeof(const char *) * static_cast<size_t>(layer_vec.size())));
for (size_t c = 0; c < static_cast<size_t>(layer_vec.size()); c++) {
#ifdef _MSC_VER
(*layer_names)[c] = _strdup(layer_vec[c].c_str());
#else
(*layer_names)[c] = strdup(layer_vec[c].c_str());
#endif
}
FreeEXRHeader(&exr_header);
return TINYEXR_SUCCESS;
}
int LoadEXR(float **out_rgba, int *width, int *height, const char *filename,
const char **err) {
return LoadEXRWithLayer(out_rgba, width, height, filename,
/* layername */ NULL, err);
}
int LoadEXRWithLayer(float **out_rgba, int *width, int *height,
const char *filename, const char *layername,
const char **err) {
if (out_rgba == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
InitEXRImage(&exr_image);
{
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
std::stringstream ss;
ss << "Failed to open EXR file or read version info from EXR file. code("
<< ret << ")";
tinyexr::SetErrorMessage(ss.str(), err);
return ret;
}
if (exr_version.multipart || exr_version.non_image) {
tinyexr::SetErrorMessage(
"Loading multipart or DeepImage is not supported in LoadEXR() API",
err);
return TINYEXR_ERROR_INVALID_DATA; // @fixme.
}
}
{
int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
// TODO: Probably limit loading to layers (channels) selected by layer index
{
int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
std::vector<std::string> layer_names;
tinyexr::GetLayers(exr_header, layer_names);
std::vector<tinyexr::LayerChannel> channels;
tinyexr::ChannelsInLayer(
exr_header, layername == NULL ? "" : std::string(layername), channels);
if (channels.size() < 1) {
tinyexr::SetErrorMessage("Layer Not Found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_LAYER_NOT_FOUND;
}
size_t ch_count = channels.size() < 4 ? channels.size() : 4;
for (size_t c = 0; c < ch_count; c++) {
const tinyexr::LayerChannel &ch = channels[c];
if (ch.name == "R") {
idxR = int(ch.index);
} else if (ch.name == "G") {
idxG = int(ch.index);
} else if (ch.name == "B") {
idxB = int(ch.index);
} else if (ch.name == "A") {
idxA = int(ch.index);
}
}
if (channels.size() == 1) {
int chIdx = int(channels.front().index);
// Grayscale channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii = exr_image.tiles[it].offset_x *
static_cast<int>(exr_header.tile_size_x) +
i;
const int jj = exr_image.tiles[it].offset_y *
static_cast<int>(exr_header.tile_size_y) +
j;
const int idx = ii + jj * static_cast<int>(exr_image.width);
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val =
reinterpret_cast<float **>(exr_image.images)[chIdx][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
}
} else {
// Assume RGB(A)
if (idxR == -1) {
tinyexr::SetErrorMessage("R channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
tinyexr::SetErrorMessage("G channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
tinyexr::SetErrorMessage("B channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
} else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int IsEXR(const char *filename) {
EXRVersion exr_version;
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_header == NULL) {
tinyexr::SetErrorMessage(
"Invalid argument. `memory` or `exr_header` argument is null in "
"ParseEXRHeaderFromMemory()",
err);
// Invalid argument
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
tinyexr::SetErrorMessage("Insufficient header/data size.\n", err);
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
tinyexr::HeaderInfo info;
info.clear();
int ret;
{
std::string err_str;
ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
if (err && !err_str.empty()) {
tinyexr::SetErrorMessage(err_str, err);
}
}
}
{
std::string warn;
std::string err_str;
if (!ConvertHeader(exr_header, info, &warn, &err_str)) {
if (err && !err_str.empty()) {
tinyexr::SetErrorMessage(err_str, err);
}
ret = TINYEXR_ERROR_INVALID_HEADER;
}
}
exr_header->multipart = version->multipart ? 1 : 0;
exr_header->non_image = version->non_image ? 1 : 0;
return ret;
}
int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err) {
if (out_rgba == NULL || memory == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
int ret = ParseEXRVersionFromMemory(&exr_version, memory, size);
if (ret != TINYEXR_SUCCESS) {
std::stringstream ss;
ss << "Failed to parse EXR version. code(" << ret << ")";
tinyexr::SetErrorMessage(ss.str(), err);
return ret;
}
ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
InitEXRImage(&exr_image);
ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
for (int c = 0; c < exr_header.num_channels; c++) {
if (strcmp(exr_header.channels[c].name, "R") == 0) {
idxR = c;
} else if (strcmp(exr_header.channels[c].name, "G") == 0) {
idxG = c;
} else if (strcmp(exr_header.channels[c].name, "B") == 0) {
idxB = c;
} else if (strcmp(exr_header.channels[c].name, "A") == 0) {
idxA = c;
}
}
// TODO(syoyo): Refactor removing same code as used in LoadEXR().
if (exr_header.num_channels == 1) {
// Grayscale channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[0][srcIdx];
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val = reinterpret_cast<float **>(exr_image.images)[0][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
}
} else {
// TODO(syoyo): Support non RGBA image.
if (idxR == -1) {
tinyexr::SetErrorMessage("R channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
tinyexr::SetErrorMessage("G channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
tinyexr::SetErrorMessage("B channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++)
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
} else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang.
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
// TODO(syoyo): return wfopen_s erro code
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler or MinGW without MINGW_HAS_SECURE_API.
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize < 16) {
tinyexr::SetErrorMessage("File size too short " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize,
err);
}
int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *memory, const size_t size,
const char **err) {
if (exr_image == NULL || memory == NULL ||
(size < tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->header_len == 0) {
tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
const unsigned char *head = memory;
const unsigned char *marker = reinterpret_cast<const unsigned char *>(
memory + exr_header->header_len +
8); // +8 for magic number + version header.
return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size,
err);
}
namespace tinyexr
{
// out_data must be allocated initially with the block-header size
// of the current image(-part) type
static bool EncodePixelData(/* out */ std::vector<unsigned char>& out_data,
const unsigned char* const* images,
int compression_type,
int /*line_order*/,
int width, // for tiled : tile.width
int /*height*/, // for tiled : header.tile_size_y
int x_stride, // for tiled : header.tile_size_x
int line_no, // for tiled : 0
int num_lines, // for tiled : tile.height
size_t pixel_data_size,
const std::vector<ChannelInfo>& channels,
const std::vector<size_t>& channel_offset_list,
const void* compression_param = 0) // zfp compression param
{
size_t buf_size = static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
static_cast<size_t>(pixel_data_size);
//int last2bit = (buf_size & 3);
// buf_size must be multiple of four
//if(last2bit) buf_size += 4 - last2bit;
std::vector<unsigned char> buf(buf_size);
size_t start_y = static_cast<size_t>(line_no);
for (size_t c = 0; c < channels.size(); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
tinyexr::FP16 h16;
h16.u = reinterpret_cast<const unsigned short * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::FP32 f32 = half_to_float(h16);
tinyexr::swap4(&f32.f);
// line_ptr[x] = f32.f;
tinyexr::cpy4(line_ptr + x, &(f32.f));
}
}
} else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
unsigned short val = reinterpret_cast<const unsigned short * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::swap2(&val);
// line_ptr[x] = val;
tinyexr::cpy2(line_ptr + x, &val);
}
}
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
tinyexr::FP32 f32;
f32.f = reinterpret_cast<const float * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::FP16 h16;
h16 = float_to_half_full(f32);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u));
// line_ptr[x] = h16.u;
tinyexr::cpy2(line_ptr + x, &(h16.u));
}
}
} else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
float val = reinterpret_cast<const float * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::swap4(&val);
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
unsigned int val = reinterpret_cast<const unsigned int * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::swap4(&val);
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
}
}
if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed)
out_data.insert(out_data.end(), buf.begin(), buf.end());
} else if ((compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#if TINYEXR_USE_MINIZ
std::vector<unsigned char> block(mz_compressBound(
static_cast<unsigned long>(buf.size())));
#else
std::vector<unsigned char> block(
compressBound(static_cast<uLong>(buf.size())));
#endif
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressZip(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// (buf.size() * 3) / 2 would be enough.
std::vector<unsigned char> block((buf.size() * 3) / 2);
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressRle(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
unsigned int bufLen =
8192 + static_cast<unsigned int>(
2 * static_cast<unsigned int>(
buf.size())); // @fixme { compute good bound. }
std::vector<unsigned char> block(bufLen);
unsigned int outSize = static_cast<unsigned int>(block.size());
CompressPiz(&block.at(0), &outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
buf.size(), channels, width, num_lines);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = outSize;
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
#else
assert(0);
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
const ZFPCompressionParam* zfp_compression_param = reinterpret_cast<const ZFPCompressionParam*>(compression_param);
std::vector<unsigned char> block;
unsigned int outSize;
tinyexr::CompressZfp(
&block, &outSize, reinterpret_cast<const float *>(&buf.at(0)),
width, num_lines, static_cast<int>(channels.size()), *zfp_compression_param);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = outSize;
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
#else
(void)compression_param;
assert(0);
#endif
} else {
assert(0);
return false;
}
return true;
}
static int EncodeTiledLevel(const EXRImage* level_image, const EXRHeader* exr_header,
const std::vector<tinyexr::ChannelInfo>& channels,
std::vector<std::vector<unsigned char> >& data_list,
size_t start_index, // for data_list
int num_x_tiles, int num_y_tiles,
const std::vector<size_t>& channel_offset_list,
int pixel_data_size,
const void* compression_param, // must be set if zfp compression is enabled
std::string* err) {
int num_tiles = num_x_tiles * num_y_tiles;
assert(num_tiles == level_image->num_tiles);
if ((exr_header->tile_size_x > level_image->width || exr_header->tile_size_y > level_image->height) &&
level_image->level_x == 0 && level_image->level_y == 0) {
if (err) {
(*err) += "Failed to encode tile data.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<bool> invalid_data(false);
#else
bool invalid_data(false);
#endif
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<int> tile_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_tiles)) {
num_threads = int(num_tiles);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
int i = 0;
while ((i = tile_count++) < num_tiles) {
#else
// Use signed int since some OpenMP compiler doesn't allow unsigned type for
// `parallel for`
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < num_tiles; i++) {
#endif
size_t tile_idx = static_cast<size_t>(i);
size_t data_idx = tile_idx + start_index;
int x_tile = i % num_x_tiles;
int y_tile = i / num_x_tiles;
EXRTile& tile = level_image->tiles[tile_idx];
const unsigned char* const* images =
static_cast<const unsigned char* const*>(tile.images);
data_list[data_idx].resize(5*sizeof(int));
size_t data_header_size = data_list[data_idx].size();
bool ret = EncodePixelData(data_list[data_idx],
images,
exr_header->compression_type,
0, // increasing y
tile.width,
exr_header->tile_size_y,
exr_header->tile_size_x,
0,
tile.height,
pixel_data_size,
channels,
channel_offset_list,
compression_param);
if (!ret) {
invalid_data = true;
continue;
}
assert(data_list[data_idx].size() > data_header_size);
int data_len = static_cast<int>(data_list[data_idx].size() - data_header_size);
//tileX, tileY, levelX, levelY // pixel_data_size(int)
memcpy(&data_list[data_idx][0], &x_tile, sizeof(int));
memcpy(&data_list[data_idx][4], &y_tile, sizeof(int));
memcpy(&data_list[data_idx][8], &level_image->level_x, sizeof(int));
memcpy(&data_list[data_idx][12], &level_image->level_y, sizeof(int));
memcpy(&data_list[data_idx][16], &data_len, sizeof(int));
swap4(reinterpret_cast<int*>(&data_list[data_idx][0]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][4]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][8]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][12]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][16]));
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
}
for (auto &t : workers) {
t.join();
}
#else
} // omp parallel
#endif
if (invalid_data) {
if (err) {
(*err) += "Failed to encode tile data.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
return TINYEXR_SUCCESS;
}
static int NumScanlines(int compression_type) {
int num_scanlines = 1;
if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanlines = 16;
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanlines = 32;
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanlines = 16;
}
return num_scanlines;
}
static int EncodeChunk(const EXRImage* exr_image, const EXRHeader* exr_header,
const std::vector<ChannelInfo>& channels,
int num_blocks,
tinyexr_uint64 chunk_offset, // starting offset of current chunk
bool is_multipart,
OffsetData& offset_data, // output block offsets, must be initialized
std::vector<std::vector<unsigned char> >& data_list, // output
tinyexr_uint64& total_size, // output: ending offset of current chunk
std::string* err) {
int num_scanlines = NumScanlines(exr_header->compression_type);
data_list.resize(num_blocks);
std::vector<size_t> channel_offset_list(
static_cast<size_t>(exr_header->num_channels));
int pixel_data_size = 0;
{
size_t channel_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
channel_offset_list[c] = channel_offset;
if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixel_data_size += sizeof(unsigned short);
channel_offset += sizeof(unsigned short);
} else if (channels[c].requested_pixel_type ==
TINYEXR_PIXELTYPE_FLOAT) {
pixel_data_size += sizeof(float);
channel_offset += sizeof(float);
} else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_UINT) {
pixel_data_size += sizeof(unsigned int);
channel_offset += sizeof(unsigned int);
} else {
assert(0);
}
}
}
const void* compression_param = 0;
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
// Use ZFP compression parameter from custom attributes(if such a parameter
// exists)
{
std::string e;
bool ret = tinyexr::FindZFPCompressionParam(
&zfp_compression_param, exr_header->custom_attributes,
exr_header->num_custom_attributes, &e);
if (!ret) {
// Use predefined compression parameter.
zfp_compression_param.type = 0;
zfp_compression_param.rate = 2;
}
compression_param = &zfp_compression_param;
}
#endif
tinyexr_uint64 offset = chunk_offset;
tinyexr_uint64 doffset = is_multipart ? 4u : 0u;
if (exr_image->tiles) {
const EXRImage* level_image = exr_image;
size_t block_idx = 0;
tinyexr::tinyexr_uint64 block_data_size = 0;
int num_levels = (exr_header->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) ?
offset_data.num_x_levels : (offset_data.num_x_levels * offset_data.num_y_levels);
for (int level_index = 0; level_index < num_levels; ++level_index) {
if (!level_image) {
if (err) {
(*err) += "Invalid number of tiled levels for EncodeChunk\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
int level_index_from_image = LevelIndex(level_image->level_x, level_image->level_y,
exr_header->tile_level_mode, offset_data.num_x_levels);
if (level_index_from_image != level_index) {
if (err) {
(*err) += "Incorrect level ordering in tiled image\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
int num_y_tiles = (int)offset_data.offsets[level_index].size();
assert(num_y_tiles);
int num_x_tiles = (int)offset_data.offsets[level_index][0].size();
assert(num_x_tiles);
std::string e;
int ret = EncodeTiledLevel(level_image,
exr_header,
channels,
data_list,
block_idx,
num_x_tiles,
num_y_tiles,
channel_offset_list,
pixel_data_size,
compression_param,
&e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty() && err) {
(*err) += e;
}
return ret;
}
for (size_t j = 0; j < static_cast<size_t>(num_y_tiles); ++j)
for (size_t i = 0; i < static_cast<size_t>(num_x_tiles); ++i) {
offset_data.offsets[level_index][j][i] = offset;
swap8(reinterpret_cast<tinyexr_uint64*>(&offset_data.offsets[level_index][j][i]));
offset += data_list[block_idx].size() + doffset;
block_data_size += data_list[block_idx].size();
++block_idx;
}
level_image = level_image->next_level;
}
assert(static_cast<int>(block_idx) == num_blocks);
total_size = offset;
} else { // scanlines
std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0];
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<bool> invalid_data(false);
std::vector<std::thread> workers;
std::atomic<int> block_count(0);
int num_threads = std::min(std::max(1, int(std::thread::hardware_concurrency())), num_blocks);
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
int i = 0;
while ((i = block_count++) < num_blocks) {
#else
bool invalid_data(false);
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < num_blocks; i++) {
#endif
int start_y = num_scanlines * i;
int end_Y = (std::min)(num_scanlines * (i + 1), exr_image->height);
int num_lines = end_Y - start_y;
const unsigned char* const* images =
static_cast<const unsigned char* const*>(exr_image->images);
data_list[i].resize(2*sizeof(int));
size_t data_header_size = data_list[i].size();
bool ret = EncodePixelData(data_list[i],
images,
exr_header->compression_type,
0, // increasing y
exr_image->width,
exr_image->height,
exr_image->width,
start_y,
num_lines,
pixel_data_size,
channels,
channel_offset_list,
compression_param);
if (!ret) {
invalid_data = true;
continue; // "break" cannot be used with OpenMP
}
assert(data_list[i].size() > data_header_size);
int data_len = static_cast<int>(data_list[i].size() - data_header_size);
memcpy(&data_list[i][0], &start_y, sizeof(int));
memcpy(&data_list[i][4], &data_len, sizeof(int));
swap4(reinterpret_cast<int*>(&data_list[i][0]));
swap4(reinterpret_cast<int*>(&data_list[i][4]));
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
}
for (auto &t : workers) {
t.join();
}
#else
} // omp parallel
#endif
if (invalid_data) {
if (err) {
(*err) += "Failed to encode scanline data.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) {
offsets[i] = offset;
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i]));
offset += data_list[i].size() + doffset;
}
total_size = static_cast<size_t>(offset);
}
return TINYEXR_SUCCESS;
}
// can save a single or multi-part image (no deep* formats)
static size_t SaveEXRNPartImageToMemory(const EXRImage* exr_images,
const EXRHeader** exr_headers,
unsigned int num_parts,
unsigned char** memory_out, const char** err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0 ||
memory_out == NULL) {
SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory",
err);
return 0;
}
{
for (unsigned int i = 0; i < num_parts; ++i) {
if (exr_headers[i]->compression_type < 0) {
SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory",
err);
return 0;
}
#if !TINYEXR_USE_PIZ
if (exr_headers[i]->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
SetErrorMessage("PIZ compression is not supported in this build",
err);
return 0;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_headers[i]->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
SetErrorMessage("ZFP compression is not supported in this build",
err);
return 0;
}
#else
for (int c = 0; c < exr_header->num_channels; ++c) {
if (exr_headers[i]->requested_pixel_types[c] != TINYEXR_PIXELTYPE_FLOAT) {
SetErrorMessage("Pixel type must be FLOAT for ZFP compression",
err);
return 0;
}
}
#endif
}
}
std::vector<unsigned char> memory;
// Header
{
const char header[] = { 0x76, 0x2f, 0x31, 0x01 };
memory.insert(memory.end(), header, header + 4);
}
// Version
// using value from the first header
int long_name = exr_headers[0]->long_name;
{
char marker[] = { 2, 0, 0, 0 };
/* @todo
if (exr_header->non_image) {
marker[1] |= 0x8;
}
*/
// tiled
if (num_parts == 1 && exr_images[0].tiles) {
marker[1] |= 0x2;
}
// long_name
if (long_name) {
marker[1] |= 0x4;
}
// multipart
if (num_parts > 1) {
marker[1] |= 0x10;
}
memory.insert(memory.end(), marker, marker + 4);
}
int total_chunk_count = 0;
std::vector<int> chunk_count(num_parts);
std::vector<OffsetData> offset_data(num_parts);
for (unsigned int i = 0; i < num_parts; ++i) {
if (!exr_images[i].tiles) {
int num_scanlines = NumScanlines(exr_headers[i]->compression_type);
chunk_count[i] =
(exr_images[i].height + num_scanlines - 1) / num_scanlines;
InitSingleResolutionOffsets(offset_data[i], chunk_count[i]);
total_chunk_count += chunk_count[i];
} else {
{
std::vector<int> num_x_tiles, num_y_tiles;
PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_headers[i]);
chunk_count[i] =
InitTileOffsets(offset_data[i], exr_headers[i], num_x_tiles, num_y_tiles);
total_chunk_count += chunk_count[i];
}
}
}
// Write attributes to memory buffer.
std::vector< std::vector<tinyexr::ChannelInfo> > channels(num_parts);
{
std::set<std::string> partnames;
for (unsigned int i = 0; i < num_parts; ++i) {
//channels
{
std::vector<unsigned char> data;
for (int c = 0; c < exr_headers[i]->num_channels; c++) {
tinyexr::ChannelInfo info;
info.p_linear = 0;
info.pixel_type = exr_headers[i]->pixel_types[c];
info.requested_pixel_type = exr_headers[i]->requested_pixel_types[c];
info.x_sampling = 1;
info.y_sampling = 1;
info.name = std::string(exr_headers[i]->channels[c].name);
channels[i].push_back(info);
}
tinyexr::WriteChannelInfo(data, channels[i]);
tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0),
static_cast<int>(data.size()));
}
{
int comp = exr_headers[i]->compression_type;
swap4(&comp);
WriteAttributeToMemory(
&memory, "compression", "compression",
reinterpret_cast<const unsigned char*>(&comp), 1);
}
{
int data[4] = { 0, 0, exr_images[i].width - 1, exr_images[i].height - 1 };
swap4(&data[0]);
swap4(&data[1]);
swap4(&data[2]);
swap4(&data[3]);
WriteAttributeToMemory(
&memory, "dataWindow", "box2i",
reinterpret_cast<const unsigned char*>(data), sizeof(int) * 4);
int data0[4] = { 0, 0, exr_images[0].width - 1, exr_images[0].height - 1 };
swap4(&data0[0]);
swap4(&data0[1]);
swap4(&data0[2]);
swap4(&data0[3]);
// Note: must be the same across parts (currently, using value from the first header)
WriteAttributeToMemory(
&memory, "displayWindow", "box2i",
reinterpret_cast<const unsigned char*>(data0), sizeof(int) * 4);
}
{
unsigned char line_order = 0; // @fixme { read line_order from EXRHeader }
WriteAttributeToMemory(&memory, "lineOrder", "lineOrder",
&line_order, 1);
}
{
// Note: must be the same across parts
float aspectRatio = 1.0f;
swap4(&aspectRatio);
WriteAttributeToMemory(
&memory, "pixelAspectRatio", "float",
reinterpret_cast<const unsigned char*>(&aspectRatio), sizeof(float));
}
{
float center[2] = { 0.0f, 0.0f };
swap4(¢er[0]);
swap4(¢er[1]);
WriteAttributeToMemory(
&memory, "screenWindowCenter", "v2f",
reinterpret_cast<const unsigned char*>(center), 2 * sizeof(float));
}
{
float w = 1.0f;
swap4(&w);
WriteAttributeToMemory(&memory, "screenWindowWidth", "float",
reinterpret_cast<const unsigned char*>(&w),
sizeof(float));
}
if (exr_images[i].tiles) {
unsigned char tile_mode = static_cast<unsigned char>(exr_headers[i]->tile_level_mode & 0x3);
if (exr_headers[i]->tile_rounding_mode) tile_mode |= (1u << 4u);
//unsigned char data[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
unsigned int datai[3] = { 0, 0, 0 };
unsigned char* data = reinterpret_cast<unsigned char*>(&datai[0]);
datai[0] = static_cast<unsigned int>(exr_headers[i]->tile_size_x);
datai[1] = static_cast<unsigned int>(exr_headers[i]->tile_size_y);
data[8] = tile_mode;
swap4(reinterpret_cast<unsigned int*>(&data[0]));
swap4(reinterpret_cast<unsigned int*>(&data[4]));
WriteAttributeToMemory(
&memory, "tiles", "tiledesc",
reinterpret_cast<const unsigned char*>(data), 9);
}
// must be present for multi-part files - according to spec.
if (num_parts > 1) {
// name
{
size_t len = 0;
if ((len = strlen(exr_headers[i]->name)) > 0) {
partnames.emplace(exr_headers[i]->name);
if (partnames.size() != i + 1) {
SetErrorMessage("'name' attributes must be unique for a multi-part file", err);
return 0;
}
WriteAttributeToMemory(
&memory, "name", "string",
reinterpret_cast<const unsigned char*>(exr_headers[i]->name),
static_cast<int>(len));
} else {
SetErrorMessage("Invalid 'name' attribute for a multi-part file", err);
return 0;
}
}
// type
{
const char* type = "scanlineimage";
if (exr_images[i].tiles) type = "tiledimage";
WriteAttributeToMemory(
&memory, "type", "string",
reinterpret_cast<const unsigned char*>(type),
static_cast<int>(strlen(type)));
}
// chunkCount
{
WriteAttributeToMemory(
&memory, "chunkCount", "int",
reinterpret_cast<const unsigned char*>(&chunk_count[i]),
4);
}
}
// Custom attributes
if (exr_headers[i]->num_custom_attributes > 0) {
for (int j = 0; j < exr_headers[i]->num_custom_attributes; j++) {
tinyexr::WriteAttributeToMemory(
&memory, exr_headers[i]->custom_attributes[j].name,
exr_headers[i]->custom_attributes[j].type,
reinterpret_cast<const unsigned char*>(
exr_headers[i]->custom_attributes[j].value),
exr_headers[i]->custom_attributes[j].size);
}
}
{ // end of header
memory.push_back(0);
}
}
}
if (num_parts > 1) {
// end of header list
memory.push_back(0);
}
tinyexr_uint64 chunk_offset = memory.size() + size_t(total_chunk_count) * sizeof(tinyexr_uint64);
tinyexr_uint64 total_size = 0;
std::vector< std::vector< std::vector<unsigned char> > > data_lists(num_parts);
for (unsigned int i = 0; i < num_parts; ++i) {
std::string e;
int ret = EncodeChunk(&exr_images[i], exr_headers[i],
channels[i],
chunk_count[i],
// starting offset of current chunk after part-number
chunk_offset,
num_parts > 1,
offset_data[i], // output: block offsets, must be initialized
data_lists[i], // output
total_size, // output
&e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
return 0;
}
chunk_offset = total_size;
}
// Allocating required memory
if (total_size == 0) { // something went wrong
tinyexr::SetErrorMessage("Output memory size is zero", err);
return 0;
}
(*memory_out) = static_cast<unsigned char*>(malloc(total_size));
// Writing header
memcpy((*memory_out), &memory[0], memory.size());
unsigned char* memory_ptr = *memory_out + memory.size();
size_t sum = memory.size();
// Writing offset data for chunks
for (unsigned int i = 0; i < num_parts; ++i) {
if (exr_images[i].tiles) {
const EXRImage* level_image = &exr_images[i];
int num_levels = (exr_headers[i]->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) ?
offset_data[i].num_x_levels : (offset_data[i].num_x_levels * offset_data[i].num_y_levels);
for (int level_index = 0; level_index < num_levels; ++level_index) {
for (size_t j = 0; j < offset_data[i].offsets[level_index].size(); ++j) {
size_t num_bytes = sizeof(tinyexr_uint64) * offset_data[i].offsets[level_index][j].size();
sum += num_bytes;
assert(sum <= total_size);
memcpy(memory_ptr,
reinterpret_cast<unsigned char*>(&offset_data[i].offsets[level_index][j][0]),
num_bytes);
memory_ptr += num_bytes;
}
level_image = level_image->next_level;
}
} else {
size_t num_bytes = sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(chunk_count[i]);
sum += num_bytes;
assert(sum <= total_size);
std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data[i].offsets[0][0];
memcpy(memory_ptr, reinterpret_cast<unsigned char*>(&offsets[0]), num_bytes);
memory_ptr += num_bytes;
}
}
// Writing chunk data
for (unsigned int i = 0; i < num_parts; ++i) {
for (size_t j = 0; j < static_cast<size_t>(chunk_count[i]); ++j) {
if (num_parts > 1) {
sum += 4;
assert(sum <= total_size);
unsigned int part_number = i;
swap4(&part_number);
memcpy(memory_ptr, &part_number, 4);
memory_ptr += 4;
}
sum += data_lists[i][j].size();
assert(sum <= total_size);
memcpy(memory_ptr, &data_lists[i][j][0], data_lists[i][j].size());
memory_ptr += data_lists[i][j].size();
}
}
assert(sum == total_size);
return total_size; // OK
}
} // tinyexr
size_t SaveEXRImageToMemory(const EXRImage* exr_image,
const EXRHeader* exr_header,
unsigned char** memory_out, const char** err) {
return tinyexr::SaveEXRNPartImageToMemory(exr_image, &exr_header, 1, memory_out, err);
}
int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL || filename == NULL ||
exr_header->compression_type < 0) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#if !TINYEXR_USE_PIZ
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
tinyexr::SetErrorMessage("PIZ compression is not supported in this build",
err);
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
tinyexr::SetErrorMessage("ZFP compression is not supported in this build",
err);
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
#endif
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"wb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
#else
// Unknown compiler or MinGW without MINGW_HAS_SECURE_API.
fp = fopen(filename, "wb");
#endif
#else
fp = fopen(filename, "wb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
unsigned char *mem = NULL;
size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err);
if (mem_size == 0) {
return TINYEXR_ERROR_SERIALZATION_FAILED;
}
size_t written_size = 0;
if ((mem_size > 0) && mem) {
written_size = fwrite(mem, 1, mem_size, fp);
}
free(mem);
fclose(fp);
if (written_size != mem_size) {
tinyexr::SetErrorMessage("Cannot write a file", err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
return TINYEXR_SUCCESS;
}
size_t SaveEXRMultipartImageToMemory(const EXRImage* exr_images,
const EXRHeader** exr_headers,
unsigned int num_parts,
unsigned char** memory_out, const char** err) {
if (exr_images == NULL || exr_headers == NULL || num_parts < 2 ||
memory_out == NULL) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory",
err);
return 0;
}
return tinyexr::SaveEXRNPartImageToMemory(exr_images, exr_headers, num_parts, memory_out, err);
}
int SaveEXRMultipartImageToFile(const EXRImage* exr_images,
const EXRHeader** exr_headers,
unsigned int num_parts,
const char* filename,
const char** err) {
if (exr_images == NULL || exr_headers == NULL || num_parts < 2) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRMultipartImageToFile",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang.
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"wb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
#else
// Unknown compiler or MinGW without MINGW_HAS_SECURE_API.
fp = fopen(filename, "wb");
#endif
#else
fp = fopen(filename, "wb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
unsigned char *mem = NULL;
size_t mem_size = SaveEXRMultipartImageToMemory(exr_images, exr_headers, num_parts, &mem, err);
if (mem_size == 0) {
return TINYEXR_ERROR_SERIALZATION_FAILED;
}
size_t written_size = 0;
if ((mem_size > 0) && mem) {
written_size = fwrite(mem, 1, mem_size, fp);
}
free(mem);
fclose(fp);
if (written_size != mem_size) {
tinyexr::SetErrorMessage("Cannot write a file", err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
return TINYEXR_SUCCESS;
}
int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) {
if (deep_image == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
#if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang.
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler or MinGW without MINGW_HAS_SECURE_API.
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
FILE *fp = fopen(filename, "rb");
if (!fp) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#endif
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize == 0) {
fclose(fp);
tinyexr::SetErrorMessage("File size is zero : " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
(void)ret;
}
fclose(fp);
const char *head = &buf[0];
const char *marker = &buf[0];
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
tinyexr::SetErrorMessage("Invalid magic number", err);
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
// Version, scanline.
{
// ver 2.0, scanline, deep bit on(0x800)
// must be [2, 0, 0, 0]
if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) {
tinyexr::SetErrorMessage("Unsupported version or scanline", err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
marker += 4;
}
int dx = -1;
int dy = -1;
int dw = -1;
int dh = -1;
int num_scanline_blocks = 1; // 16 for ZIP compression.
int compression_type = -1;
int num_channels = -1;
std::vector<tinyexr::ChannelInfo> channels;
// Read attributes
size_t size = filesize - tinyexr::kEXRVersionSize;
for (;;) {
if (0 == size) {
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
marker++;
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
std::stringstream ss;
ss << "Failed to parse attribute\n";
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
if (attr_name.compare("compression") == 0) {
compression_type = data[0];
if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) {
std::stringstream ss;
ss << "Unsupported compression type : " << compression_type;
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
}
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!tinyexr::ReadChannelInfo(channels, data)) {
tinyexr::SetErrorMessage("Failed to parse channel info", err);
return TINYEXR_ERROR_INVALID_DATA;
}
num_channels = static_cast<int>(channels.size());
if (num_channels < 1) {
tinyexr::SetErrorMessage("Invalid channels format", err);
return TINYEXR_ERROR_INVALID_DATA;
}
} else if (attr_name.compare("dataWindow") == 0) {
memcpy(&dx, &data.at(0), sizeof(int));
memcpy(&dy, &data.at(4), sizeof(int));
memcpy(&dw, &data.at(8), sizeof(int));
memcpy(&dh, &data.at(12), sizeof(int));
tinyexr::swap4(&dx);
tinyexr::swap4(&dy);
tinyexr::swap4(&dw);
tinyexr::swap4(&dh);
} else if (attr_name.compare("displayWindow") == 0) {
int x;
int y;
int w;
int h;
memcpy(&x, &data.at(0), sizeof(int));
memcpy(&y, &data.at(4), sizeof(int));
memcpy(&w, &data.at(8), sizeof(int));
memcpy(&h, &data.at(12), sizeof(int));
tinyexr::swap4(&x);
tinyexr::swap4(&y);
tinyexr::swap4(&w);
tinyexr::swap4(&h);
}
}
assert(dx >= 0);
assert(dy >= 0);
assert(dw >= 0);
assert(dh >= 0);
assert(num_channels >= 1);
int data_width = dw - dx + 1;
int data_height = dh - dy + 1;
// Read offset tables.
int num_blocks = data_height / num_scanline_blocks;
if (num_blocks * num_scanline_blocks < data_height) {
num_blocks++;
}
std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks));
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
tinyexr::tinyexr_int64 offset;
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset));
marker += sizeof(tinyexr::tinyexr_int64); // = 8
offsets[y] = offset;
}
#if TINYEXR_USE_PIZ
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) {
#else
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#endif
// OK
} else {
tinyexr::SetErrorMessage("Unsupported compression format", err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
deep_image->image = static_cast<float ***>(
malloc(sizeof(float **) * static_cast<size_t>(num_channels)));
for (int c = 0; c < num_channels; c++) {
deep_image->image[c] = static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
}
}
deep_image->offset_table = static_cast<int **>(
malloc(sizeof(int *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
deep_image->offset_table[y] = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(data_width)));
}
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y]);
// int: y coordinate
// int64: packed size of pixel offset table
// int64: packed size of sample data
// int64: unpacked size of sample data
// compressed pixel offset table
// compressed sample data
int line_no;
tinyexr::tinyexr_int64 packedOffsetTableSize;
tinyexr::tinyexr_int64 packedSampleDataSize;
tinyexr::tinyexr_int64 unpackedSampleDataSize;
memcpy(&line_no, data_ptr, sizeof(int));
memcpy(&packedOffsetTableSize, data_ptr + 4,
sizeof(tinyexr::tinyexr_int64));
memcpy(&packedSampleDataSize, data_ptr + 12,
sizeof(tinyexr::tinyexr_int64));
memcpy(&unpackedSampleDataSize, data_ptr + 20,
sizeof(tinyexr::tinyexr_int64));
tinyexr::swap4(&line_no);
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize));
std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width));
// decode pixel offset table.
{
unsigned long dstLen =
static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int));
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)),
&dstLen, data_ptr + 28,
static_cast<unsigned long>(packedOffsetTableSize))) {
return false;
}
assert(dstLen == pixelOffsetTable.size() * sizeof(int));
for (size_t i = 0; i < static_cast<size_t>(data_width); i++) {
deep_image->offset_table[y][i] = pixelOffsetTable[i];
}
}
std::vector<unsigned char> sample_data(
static_cast<size_t>(unpackedSampleDataSize));
// decode sample data.
{
unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize);
if (dstLen) {
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen,
data_ptr + 28 + packedOffsetTableSize,
static_cast<unsigned long>(packedSampleDataSize))) {
return false;
}
assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize));
}
}
// decode sample
int sampleSize = -1;
std::vector<int> channel_offset_list(static_cast<size_t>(num_channels));
{
int channel_offset = 0;
for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) {
channel_offset_list[i] = channel_offset;
if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT
channel_offset += 4;
} else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half
channel_offset += 2;
} else if (channels[i].pixel_type ==
TINYEXR_PIXELTYPE_FLOAT) { // float
channel_offset += 4;
} else {
assert(0);
}
}
sampleSize = channel_offset;
}
assert(sampleSize >= 2);
assert(static_cast<size_t>(
pixelOffsetTable[static_cast<size_t>(data_width - 1)] *
sampleSize) == sample_data.size());
int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize;
//
// Alloc memory
//
//
// pixel data is stored as image[channels][pixel_samples]
//
{
tinyexr::tinyexr_uint64 data_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
deep_image->image[c][y] = static_cast<float *>(
malloc(sizeof(float) * static_cast<size_t>(samples_per_line)));
if (channels[c].pixel_type == 0) { // UINT
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
unsigned int ui;
unsigned int *src_ptr = reinterpret_cast<unsigned int *>(
&sample_data.at(size_t(data_offset) + x * sizeof(int)));
tinyexr::cpy4(&ui, src_ptr);
deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme
}
data_offset +=
sizeof(unsigned int) * static_cast<size_t>(samples_per_line);
} else if (channels[c].pixel_type == 1) { // half
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
tinyexr::FP16 f16;
const unsigned short *src_ptr = reinterpret_cast<unsigned short *>(
&sample_data.at(size_t(data_offset) + x * sizeof(short)));
tinyexr::cpy2(&(f16.u), src_ptr);
tinyexr::FP32 f32 = half_to_float(f16);
deep_image->image[c][y][x] = f32.f;
}
data_offset += sizeof(short) * static_cast<size_t>(samples_per_line);
} else { // float
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
float f;
const float *src_ptr = reinterpret_cast<float *>(
&sample_data.at(size_t(data_offset) + x * sizeof(float)));
tinyexr::cpy4(&f, src_ptr);
deep_image->image[c][y][x] = f;
}
data_offset += sizeof(float) * static_cast<size_t>(samples_per_line);
}
}
}
} // y
deep_image->width = data_width;
deep_image->height = data_height;
deep_image->channel_names = static_cast<const char **>(
malloc(sizeof(const char *) * static_cast<size_t>(num_channels)));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
#ifdef _WIN32
deep_image->channel_names[c] = _strdup(channels[c].name.c_str());
#else
deep_image->channel_names[c] = strdup(channels[c].name.c_str());
#endif
}
deep_image->num_channels = num_channels;
return TINYEXR_SUCCESS;
}
void InitEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return;
}
exr_image->width = 0;
exr_image->height = 0;
exr_image->num_channels = 0;
exr_image->images = NULL;
exr_image->tiles = NULL;
exr_image->next_level = NULL;
exr_image->level_x = 0;
exr_image->level_y = 0;
exr_image->num_tiles = 0;
}
void FreeEXRErrorMessage(const char *msg) {
if (msg) {
free(reinterpret_cast<void *>(const_cast<char *>(msg)));
}
return;
}
void InitEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return;
}
memset(exr_header, 0, sizeof(EXRHeader));
}
int FreeEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->channels) {
free(exr_header->channels);
}
if (exr_header->pixel_types) {
free(exr_header->pixel_types);
}
if (exr_header->requested_pixel_types) {
free(exr_header->requested_pixel_types);
}
for (int i = 0; i < exr_header->num_custom_attributes; i++) {
if (exr_header->custom_attributes[i].value) {
free(exr_header->custom_attributes[i].value);
}
}
if (exr_header->custom_attributes) {
free(exr_header->custom_attributes);
}
EXRSetNameAttr(exr_header, NULL);
return TINYEXR_SUCCESS;
}
void EXRSetNameAttr(EXRHeader* exr_header, const char* name) {
if (exr_header == NULL) {
return;
}
memset(exr_header->name, 0, 256);
if (name != NULL) {
size_t len = std::min(strlen(name), (size_t)255);
if (len) {
memcpy(exr_header->name, name, len);
}
}
}
int EXRNumLevels(const EXRImage* exr_image) {
if (exr_image == NULL) return 0;
if(exr_image->images) return 1; // scanlines
int levels = 1;
const EXRImage* level_image = exr_image;
while((level_image = level_image->next_level)) ++levels;
return levels;
}
int FreeEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_image->next_level) {
FreeEXRImage(exr_image->next_level);
delete exr_image->next_level;
}
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->images && exr_image->images[i]) {
free(exr_image->images[i]);
}
}
if (exr_image->images) {
free(exr_image->images);
}
if (exr_image->tiles) {
for (int tid = 0; tid < exr_image->num_tiles; tid++) {
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) {
free(exr_image->tiles[tid].images[i]);
}
}
if (exr_image->tiles[tid].images) {
free(exr_image->tiles[tid].images);
}
}
free(exr_image->tiles);
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_header == NULL || exr_version == NULL || filename == NULL) {
tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang.
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_INVALID_FILE;
}
#else
// Unknown compiler or MinGW without MINGW_HAS_SECURE_API.
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
tinyexr::SetErrorMessage("fread() error on " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize,
err);
}
int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers,
int *num_headers,
const EXRVersion *exr_version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_headers == NULL || num_headers == NULL ||
exr_version == NULL) {
// Invalid argument
tinyexr::SetErrorMessage(
"Invalid argument for ParseEXRMultipartHeaderFromMemory", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
tinyexr::SetErrorMessage("Data size too short", err);
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
std::vector<tinyexr::HeaderInfo> infos;
for (;;) {
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
bool empty_header = false;
int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str,
marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage(err_str, err);
return ret;
}
if (empty_header) {
marker += 1; // skip '\0'
break;
}
// `chunkCount` must exist in the header.
if (info.chunk_count == 0) {
tinyexr::SetErrorMessage(
"`chunkCount' attribute is not found in the header.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
infos.push_back(info);
// move to next header.
marker += info.header_len;
size -= info.header_len;
}
// allocate memory for EXRHeader and create array of EXRHeader pointers.
(*exr_headers) =
static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size()));
int retcode = TINYEXR_SUCCESS;
for (size_t i = 0; i < infos.size(); i++) {
EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader)));
memset(exr_header, 0, sizeof(EXRHeader));
std::string warn;
std::string _err;
if (!ConvertHeader(exr_header, infos[i], &warn, &_err)) {
if (!_err.empty()) {
tinyexr::SetErrorMessage(
_err, err);
}
// continue to converting headers
retcode = TINYEXR_ERROR_INVALID_HEADER;
}
exr_header->multipart = exr_version->multipart ? 1 : 0;
(*exr_headers)[i] = exr_header;
}
(*num_headers) = static_cast<int>(infos.size());
return retcode;
}
int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers,
const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_headers == NULL || num_headers == NULL || exr_version == NULL ||
filename == NULL) {
tinyexr::SetErrorMessage(
"Invalid argument for ParseEXRMultipartHeaderFromFile()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang.
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_INVALID_FILE;
}
#else
// Unknown compiler or MinGW without MINGW_HAS_SECURE_API.
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err);
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRMultipartHeaderFromMemory(
exr_headers, num_headers, exr_version, &buf.at(0), filesize, err);
}
int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory,
size_t size) {
if (version == NULL || memory == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory;
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
version->tiled = false;
version->long_name = false;
version->non_image = false;
version->multipart = false;
// Parse version header.
{
// must be 2
if (marker[0] != 2) {
return TINYEXR_ERROR_INVALID_EXR_VERSION;
}
if (version == NULL) {
return TINYEXR_SUCCESS; // May OK
}
version->version = 2;
if (marker[1] & 0x2) { // 9th bit
version->tiled = true;
}
if (marker[1] & 0x4) { // 10th bit
version->long_name = true;
}
if (marker[1] & 0x8) { // 11th bit
version->non_image = true; // (deep image)
}
if (marker[1] & 0x10) { // 12th bit
version->multipart = true;
}
}
return TINYEXR_SUCCESS;
}
int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) {
if (filename == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang.
errno_t err = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (err != 0) {
// TODO(syoyo): return wfopen_s erro code
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler or MinGW without MINGW_HAS_SECURE_API.
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t file_size;
// Compute size
fseek(fp, 0, SEEK_END);
file_size = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (file_size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
unsigned char buf[tinyexr::kEXRVersionSize];
size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp);
fclose(fp);
if (ret != tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize);
}
int LoadEXRMultipartImageFromMemory(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0 ||
memory == NULL || (size <= tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage(
"Invalid argument for LoadEXRMultipartImageFromMemory()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
// compute total header size.
size_t total_header_size = 0;
for (unsigned int i = 0; i < num_parts; i++) {
if (exr_headers[i]->header_len == 0) {
tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
total_header_size += exr_headers[i]->header_len;
}
const char *marker = reinterpret_cast<const char *>(
memory + total_header_size + 4 +
4); // +8 for magic number and version header.
marker += 1; // Skip empty header.
// NOTE 1:
// In multipart image, There is 'part number' before chunk data.
// 4 byte : part number
// 4+ : chunk
//
// NOTE 2:
// EXR spec says 'part number' is 'unsigned long' but actually this is
// 'unsigned int(4 bytes)' in OpenEXR implementation...
// http://www.openexr.com/openexrfilelayout.pdf
// Load chunk offset table.
std::vector<tinyexr::OffsetData> chunk_offset_table_list;
chunk_offset_table_list.reserve(num_parts);
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
chunk_offset_table_list.resize(chunk_offset_table_list.size() + 1);
tinyexr::OffsetData& offset_data = chunk_offset_table_list.back();
if (!exr_headers[i]->tiled || exr_headers[i]->tile_level_mode == TINYEXR_TILE_ONE_LEVEL) {
tinyexr::InitSingleResolutionOffsets(offset_data, exr_headers[i]->chunk_count);
std::vector<tinyexr::tinyexr_uint64>& offset_table = offset_data.offsets[0][0];
for (size_t c = 0; c < offset_table.size(); c++) {
tinyexr::tinyexr_uint64 offset;
memcpy(&offset, marker, 8);
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
offset_table[c] = offset + 4; // +4 to skip 'part number'
marker += 8;
}
} else {
{
std::vector<int> num_x_tiles, num_y_tiles;
tinyexr::PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_headers[i]);
int num_blocks = InitTileOffsets(offset_data, exr_headers[i], num_x_tiles, num_y_tiles);
if (num_blocks != exr_headers[i]->chunk_count) {
tinyexr::SetErrorMessage("Invalid offset table size.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
tinyexr::tinyexr_uint64 offset;
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
offset_data.offsets[l][dy][dx] = offset + 4; // +4 to skip 'part number'
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
}
}
}
}
}
// Decode image.
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
tinyexr::OffsetData &offset_data = chunk_offset_table_list[i];
// First check 'part number' is identitical to 'i'
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l)
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy)
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
const unsigned char *part_number_addr =
memory + offset_data.offsets[l][dy][dx] - 4; // -4 to move to 'part number' field.
unsigned int part_no;
memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4
tinyexr::swap4(&part_no);
if (part_no != i) {
tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
std::string e;
int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_data,
memory, size, &e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
return ret;
}
}
return TINYEXR_SUCCESS;
}
int LoadEXRMultipartImageFromFile(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts, const char *filename,
const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0) {
tinyexr::SetErrorMessage(
"Invalid argument for LoadEXRMultipartImageFromFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang.
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler or MinGW without MINGW_HAS_SECURE_API.
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts,
&buf.at(0), filesize, err);
}
int SaveEXR(const float *data, int width, int height, int components,
const int save_as_fp16, const char *outfilename, const char **err) {
if ((components == 1) || components == 3 || components == 4) {
// OK
} else {
std::stringstream ss;
ss << "Unsupported component value : " << components << std::endl;
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRHeader header;
InitEXRHeader(&header);
if ((width < 16) && (height < 16)) {
// No compression for small image.
header.compression_type = TINYEXR_COMPRESSIONTYPE_NONE;
} else {
header.compression_type = TINYEXR_COMPRESSIONTYPE_ZIP;
}
EXRImage image;
InitEXRImage(&image);
image.num_channels = components;
std::vector<float> images[4];
if (components == 1) {
images[0].resize(static_cast<size_t>(width * height));
memcpy(images[0].data(), data, sizeof(float) * size_t(width * height));
} else {
images[0].resize(static_cast<size_t>(width * height));
images[1].resize(static_cast<size_t>(width * height));
images[2].resize(static_cast<size_t>(width * height));
images[3].resize(static_cast<size_t>(width * height));
// Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers
for (size_t i = 0; i < static_cast<size_t>(width * height); i++) {
images[0][i] = data[static_cast<size_t>(components) * i + 0];
images[1][i] = data[static_cast<size_t>(components) * i + 1];
images[2][i] = data[static_cast<size_t>(components) * i + 2];
if (components == 4) {
images[3][i] = data[static_cast<size_t>(components) * i + 3];
}
}
}
float *image_ptr[4] = {0, 0, 0, 0};
if (components == 4) {
image_ptr[0] = &(images[3].at(0)); // A
image_ptr[1] = &(images[2].at(0)); // B
image_ptr[2] = &(images[1].at(0)); // G
image_ptr[3] = &(images[0].at(0)); // R
} else if (components == 3) {
image_ptr[0] = &(images[2].at(0)); // B
image_ptr[1] = &(images[1].at(0)); // G
image_ptr[2] = &(images[0].at(0)); // R
} else if (components == 1) {
image_ptr[0] = &(images[0].at(0)); // A
}
image.images = reinterpret_cast<unsigned char **>(image_ptr);
image.width = width;
image.height = height;
header.num_channels = components;
header.channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels)));
// Must be (A)BGR order, since most of EXR viewers expect this channel order.
if (components == 4) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
strncpy_s(header.channels[1].name, "B", 255);
strncpy_s(header.channels[2].name, "G", 255);
strncpy_s(header.channels[3].name, "R", 255);
#else
strncpy(header.channels[0].name, "A", 255);
strncpy(header.channels[1].name, "B", 255);
strncpy(header.channels[2].name, "G", 255);
strncpy(header.channels[3].name, "R", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
header.channels[1].name[strlen("B")] = '\0';
header.channels[2].name[strlen("G")] = '\0';
header.channels[3].name[strlen("R")] = '\0';
} else if (components == 3) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "B", 255);
strncpy_s(header.channels[1].name, "G", 255);
strncpy_s(header.channels[2].name, "R", 255);
#else
strncpy(header.channels[0].name, "B", 255);
strncpy(header.channels[1].name, "G", 255);
strncpy(header.channels[2].name, "R", 255);
#endif
header.channels[0].name[strlen("B")] = '\0';
header.channels[1].name[strlen("G")] = '\0';
header.channels[2].name[strlen("R")] = '\0';
} else {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
#else
strncpy(header.channels[0].name, "A", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
}
header.pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
header.requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
for (int i = 0; i < header.num_channels; i++) {
header.pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image
if (save_as_fp16 > 0) {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format
} else {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e.
// no precision reduction)
}
}
int ret = SaveEXRImageToFile(&image, &header, outfilename, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
free(header.channels);
free(header.pixel_types);
free(header.requested_pixel_types);
return ret;
}
#ifdef __clang__
// zero-as-null-ppinter-constant
#pragma clang diagnostic pop
#endif
#endif // TINYEXR_IMPLEMENTATION_DEFINED
#endif // TINYEXR_IMPLEMENTATION
|
owl_ndarray_pool_impl.h | /*
* OWL - OCaml Scientific and Engineering Computing
* Copyright (c) 2016-2019 Liang Wang <liang.wang@cl.cam.ac.uk>
*/
#ifdef OWL_ENABLE_TEMPLATE
CAMLprim value FUN_NATIVE (spatial) (
value vInput_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows,
value vRow_stride, value vCol_stride,
value vPadding, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = input_cols * input_rows * in_channel;
const int input_ri = input_rows * in_channel;
const int output_cri = output_cols * output_rows * in_channel;
const int output_ri = output_rows * in_channel;
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
int pr = 0, pc = 0;
if (padding != 1){
pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_cri;
const int output_idx_base_i = i * output_cri;
for (int j = 0; j < output_cols; ++j) {
const int output_idx_base_j = output_idx_base_i + j * output_ri;
for (int k = 0; k < output_rows; ++k) {
const int output_idx_base = output_idx_base_j + k * in_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
for (int l = 0; l < in_channel; ++l) {
TYPE acc = INITACC;
int c = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + l;
TYPE t = *(input_ptr + input_idx);
ACCFN (acc, t);
c++;
}
}
}
int output_idx = output_idx_base + l;
*(output_ptr + output_idx) = UPDATEFN (acc, c);
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial) (value * argv, int argn) {
return FUN_NATIVE (spatial) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]
);
}
CAMLprim value FUN_NATIVE (spatial_backward) (
value vInput, value vOutput_back, value vInput_back,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows,
value vRow_stride, value vCol_stride,
value vPad_rows, value vPad_cols
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *OUB = Caml_ba_array_val(vOutput_back);
struct caml_ba_array *INB = Caml_ba_array_val(vInput_back);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *output_backward_ptr = (TYPE *) OUB->data;
TYPE *input_backward_ptr = (TYPE *) INB->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int pad_rows = Long_val(vPad_rows);
int pad_cols = Long_val(vPad_cols);
const int ksize = kernel_cols * kernel_rows;
const int output_cri = output_cols * output_rows * in_channel;
const int output_ri = output_rows * in_channel;
const int input_cri = input_cols * input_rows * in_channel;
const int input_ri = input_rows * in_channel;
if (pad_cols < 0) pad_cols = 0;
if (pad_rows < 0) pad_rows = 0;
memset(input_backward_ptr, 0,
batches * input_cols * input_rows * in_channel * sizeof(TYPE));
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_cri;
const int output_idx_base_i = i * output_cri;
for (int j = 0; j < output_cols; ++j) {
const int output_idx_base_j = output_idx_base_i + j * output_ri;
for (int k = 0; k < output_rows; ++k) {
const int output_idx_base = output_idx_base_j + k * in_channel;
const int cstart = j * col_stride - pad_cols;
const int rstart = k * row_stride - pad_rows;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
for (int l = 0; l < in_channel; ++l) {
TYPE m;
int output_idx = output_idx_base + l;
m = *(output_backward_ptr + output_idx);
int idx[ksize];
memset(idx, 0, ksize * sizeof(int));
TYPE acc = INITACC;
int max_idx = 0;
int c = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + l;
idx[c++] = input_idx;
#ifdef OWL_NDARRAY_MAX
TYPE t = *(input_ptr + input_idx);
if (PLT(acc,t)){
acc = t;
max_idx = input_idx;
}
#endif
}
}
}
#ifdef OWL_NDARRAY_AVG
for (int i = 0; i < c; i++) {
*(input_backward_ptr + idx[i]) += UPDATEFN (m, c);
}
#else
*(input_backward_ptr + max_idx) += UPDATEFN (m, c);
#endif
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]
);
}
CAMLprim value FUN_NATIVE (cuboid) (
value vInput, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows, value vOutput_dpts,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vPadding
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
const int output_crdi = output_cols * output_rows * output_dpts * in_channel;
const int output_rdi = output_rows * output_dpts * in_channel;
const int output_di = output_dpts * in_channel;
const int input_crdi = input_cols * input_rows * input_dpts * in_channel;
const int input_rdi = input_rows * input_dpts * in_channel;
const int input_di = input_dpts * in_channel;
memset(output_ptr, 0, batches * output_crdi * sizeof(TYPE));
int pd, pr, pc;
if (padding == 1) {
pc = 0; pr = 0; pd = 0;
} else {
int pad_cols = col_stride * (output_cols - 1) + kernel_cols - input_cols;
int pad_rows = row_stride * (output_rows - 1) + kernel_rows - input_rows;
int pad_dpts = dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts;
pc = pad_cols / 2; if (pc < 0) pc = 0;
pr = pad_rows / 2; if (pr < 0) pr = 0;
pd = pad_dpts / 2; if (pd < 0) pd = 0;
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_crdi;
const int output_idx_base_i = i * output_crdi;
for (int j = 0; j < output_cols; ++j) {
const int output_idx_base_j = output_idx_base_i + j * output_rdi;
for (int k = 0; k < output_rows; ++k) {
const int output_idx_base_k = output_idx_base_j + k * output_di;
for (int d = 0; d < output_dpts; ++d) {
const int output_idx_base = output_idx_base_k + d * in_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
for (int l = 0; l < in_channel; ++l) {
TYPE acc = INITACC;
int counter = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c){
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + l;
TYPE t = *(input_ptr + input_idx);
ACCFN (acc, t);
counter++;
}
}
}
}
int output_idx = output_idx_base + l;
*(output_ptr + output_idx) = UPDATEFN (acc, counter);
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid) (value * argv, int argn) {
return FUN_NATIVE (cuboid) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward) (
value vInput, value vOutput_back, value vInput_back,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows, value vOutput_dpts,
value vCol_stride, value vRow_stride, value vDpt_stride,
value vPadding
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *OUB = Caml_ba_array_val(vOutput_back);
struct caml_ba_array *INB = Caml_ba_array_val(vInput_back);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *output_backward_ptr = (TYPE *) OUB->data;
TYPE *input_backward_ptr = (TYPE *) INB->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int col_stride = Long_val(vCol_stride);
int row_stride = Long_val(vRow_stride);
int dpt_stride = Long_val(vDpt_stride);
int padding = Long_val(vPadding);
const int ksize = kernel_cols * kernel_rows * kernel_dpts;
const int output_crdi = output_cols * output_rows * output_dpts * in_channel;
const int output_rdi = output_rows * output_dpts * in_channel;
const int output_di = output_dpts * in_channel;
const int input_crdi = input_cols * input_rows * input_dpts * in_channel;
const int input_rdi = input_rows * input_dpts * in_channel;
const int input_di = input_dpts * in_channel;
int pd, pr, pc;
if (padding == 1) {
pc = 0; pr = 0; pd = 0;
} else {
int pad_cols = col_stride * (output_cols - 1) + kernel_cols - input_cols;
int pad_rows = row_stride * (output_rows - 1) + kernel_rows - input_rows;
int pad_dpts = dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts;
pc = pad_cols / 2; if (pc < 0) pc = 0;
pr = pad_rows / 2; if (pr < 0) pr = 0;
pd = pad_dpts / 2; if (pd < 0) pd = 0;
}
memset(input_backward_ptr, 0, batches * input_crdi * sizeof(TYPE));
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_crdi;
const int output_idx_base_i = i * output_crdi;
for (int j = 0; j < output_cols; ++j) {
const int output_idx_base_j = output_idx_base_i + j * output_rdi;
for (int k = 0; k < output_rows; ++k) {
const int output_idx_base_k = output_idx_base_j + k * output_di;
for (int d = 0; d < output_dpts; ++d) {
const int output_idx_base = output_idx_base_k + d * in_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
for (int l = 0; l < in_channel; ++l) {
TYPE m;
int output_idx = output_idx_base + l;
m = *(output_backward_ptr + output_idx);
int idx[ksize];
memset(idx, 0, ksize * sizeof(int));
TYPE acc = INITACC;
int max_idx = 0;
int counter = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + l;
idx[counter++] = input_idx;
#ifdef OWL_NDARRAY_MAX
TYPE t = *(input_ptr + input_idx);
if (PLT(acc,t)){
acc = t;
max_idx = input_idx;
}
#endif
}
}
}
}
#ifdef OWL_NDARRAY_AVG
for (int i = 0; i < counter; i++) {
*(input_backward_ptr + idx[i]) += UPDATEFN (m, counter);
}
#else
*(input_backward_ptr + max_idx) += UPDATEFN (m, counter);
#endif
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17]
);
}
#ifdef OWL_NDARRAY_MAX
CAMLprim value FUN_NATIVE (spatial_arg) (
value vInput_ptr, value vOutput_ptr, value vArgmax_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows,
value vRow_stride, value vCol_stride,
value vPad_rows, value vPad_cols
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
struct caml_ba_array *AG = Caml_ba_array_val(vArgmax_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *output_ptr = (TYPE *) OU->data;
int64_t *argmax_ptr = (int64_t *) AG->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int pad_rows = Long_val(vPad_rows);
int pad_cols = Long_val(vPad_cols);
if (pad_rows < 0) pad_rows = 0.;
if (pad_cols < 0) pad_cols = 0.;
const int input_cri = input_cols * input_rows * in_channel;
const int input_ri = input_rows * in_channel;
const int output_cri = output_cols * output_rows * in_channel;
const int output_ri = output_rows * in_channel;
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
memset(argmax_ptr, 0, batches * output_cri * sizeof(int64_t));
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_cri;
const int output_idx_base_i = i * output_cri;
for (int j = 0; j < output_cols; ++j) {
const int output_idx_base_j = output_idx_base_i + j * output_ri;
for (int k = 0; k < output_rows; ++k) {
const int output_idx_base = output_idx_base_j + k * in_channel;
const int cstart = j * col_stride - pad_cols;
const int rstart = k * row_stride - pad_rows;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
for (int l = 0; l < in_channel; ++l) {
TYPE acc = INITACC;
int max_idx = -1;
int c = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + l;
TYPE t = *(input_ptr + input_idx);
if (PLT(acc,t)){
acc = t;
max_idx = input_idx;
}
c++;
}
}
}
int output_idx = output_idx_base + l;
*(output_ptr + output_idx) = acc;
*(argmax_ptr + output_idx) = (int64_t) max_idx;
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_arg) (value * argv, int argn) {
return FUN_NATIVE (spatial_arg) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]
);
}
#endif /* OWL_NDARRAY_MAX */
#endif /* OWL_ENABLE_TEMPLATE */
|
spatial_methods.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Suneth Warnakulasuriya (https://github.com/sunethwarna)
//
#if !defined(KRATOS_SPATIAL_VARIANCE_METHOD_H_INCLUDED)
#define KRATOS_SPATIAL_VARIANCE_METHOD_H_INCLUDED
// System includes
#include <algorithm>
#include <cmath>
#include <functional>
#include <numeric>
#include <tuple>
#include <vector>
// External includes
// Project includes
#include "includes/communicator.h"
#include "includes/define.h"
#include "includes/model_part.h"
// Application includes
#include "custom_utilities/method_utilities.h"
namespace Kratos
{
///@addtogroup RANSApplication
///@{
///@name Kratos Globals
///@{
namespace SpatialMethods
{
template <class TContainerType, class TContainerItemType, template <class T> class TDataRetrievalFunctor>
class ContainerSpatialMethods
{
public:
// special overloaded method for flags
int static CalculateSum(const ModelPart& rModelPart, const Flags& rVariable)
{
const TContainerType& r_container =
MethodUtilities::GetLocalDataContainer<TContainerType>(rModelPart);
int sum = 0;
#pragma omp parallel for reduction(+ : sum)
for (int i = 0; i < static_cast<int>(r_container.size()); ++i)
{
const TContainerItemType& r_item = *(r_container.begin() + i);
sum += r_item.Is(rVariable);
}
sum = rModelPart.GetCommunicator().GetDataCommunicator().SumAll(sum);
return sum;
}
template <class TDataType>
TDataType static CalculateSum(const ModelPart& rModelPart, const Variable<TDataType>& rVariable)
{
KRATOS_TRY
const TContainerType& r_container =
MethodUtilities::GetLocalDataContainer<TContainerType>(rModelPart);
TDataType global_sum = rVariable.Zero();
if (r_container.size() > 0)
{
const TDataType& r_initial_value =
TDataRetrievalFunctor<TContainerItemType>()(*r_container.begin(), rVariable);
MethodUtilities::DataTypeSizeInitializer(global_sum, r_initial_value);
#pragma omp parallel
{
TDataType sum = rVariable.Zero();
MethodUtilities::DataTypeSizeInitializer(sum, r_initial_value);
#pragma omp for
for (int i = 0; i < static_cast<int>(r_container.size()); ++i)
{
const TContainerItemType& r_item = *(r_container.begin() + i);
const TDataType& current_value =
TDataRetrievalFunctor<TContainerItemType>()(r_item, rVariable);
MethodUtilities::DataTypeSizeChecker(current_value, sum);
sum += current_value;
}
#pragma omp critical
{
global_sum += sum;
}
}
}
global_sum = rModelPart.GetCommunicator().GetDataCommunicator().SumAll(global_sum);
return global_sum;
KRATOS_CATCH("");
}
template <class TDataType>
double static CalculateNormSum(
const ModelPart& rModelPart,
const Variable<TDataType>& rVariable,
const std::string& rNormType,
Parameters Params)
{
KRATOS_TRY
const TContainerType& r_container =
MethodUtilities::GetLocalDataContainer<TContainerType>(rModelPart);
double global_sum = 0.0;
const auto& norm_method =
MethodUtilities::GetNormMethod<TDataType>(rVariable, rNormType);
#pragma omp parallel
{
double sum = 0.0;
#pragma omp for
for (int i = 0; i < static_cast<int>(r_container.size()); ++i)
{
const TContainerItemType& r_item = *(r_container.begin() + i);
const TDataType& current_value =
TDataRetrievalFunctor<TContainerItemType>()(r_item, rVariable);
sum += norm_method(current_value);
}
#pragma omp atomic
global_sum += sum;
}
global_sum = rModelPart.GetCommunicator().GetDataCommunicator().SumAll(global_sum);
return global_sum;
KRATOS_CATCH("");
}
template <class TDataType>
TDataType static CalculateRootMeanSquare(const ModelPart& rModelPart, const Variable<TDataType>& rVariable)
{
KRATOS_TRY
const TContainerType& r_container =
MethodUtilities::GetLocalDataContainer<TContainerType>(rModelPart);
TDataType global_sum = rVariable.Zero();
if (r_container.size() > 0)
{
const TDataType& r_initial_value =
TDataRetrievalFunctor<TContainerItemType>()(*r_container.begin(), rVariable);
MethodUtilities::DataTypeSizeInitializer(global_sum, r_initial_value);
#pragma omp parallel
{
TDataType sum = rVariable.Zero();
MethodUtilities::DataTypeSizeInitializer(sum, r_initial_value);
#pragma omp for
for (int i = 0; i < static_cast<int>(r_container.size()); ++i)
{
const TContainerItemType& r_item = *(r_container.begin() + i);
const TDataType& current_value =
TDataRetrievalFunctor<TContainerItemType>()(r_item, rVariable);
MethodUtilities::DataTypeSizeChecker(current_value, sum);
sum += MethodUtilities::RaiseToPower<TDataType>(current_value, 2);
}
#pragma omp critical
{
global_sum += sum;
}
}
}
global_sum = rModelPart.GetCommunicator().GetDataCommunicator().SumAll(global_sum);
const double number_of_items =
rModelPart.GetCommunicator().GetDataCommunicator().SumAll(
static_cast<double>(r_container.size()));
global_sum = MethodUtilities::RaiseToPower<TDataType>(
global_sum * (1.0 / std::max(number_of_items, 1.0)), 0.5);
return global_sum;
KRATOS_CATCH("");
}
template <class TDataType>
double static CalculateNormRootMeanSquare(
const ModelPart& rModelPart,
const Variable<TDataType>& rVariable,
const std::string& rNormType,
Parameters Params)
{
KRATOS_TRY
const TContainerType& r_container =
MethodUtilities::GetLocalDataContainer<TContainerType>(rModelPart);
double global_sum = 0.0;
const auto& norm_method =
MethodUtilities::GetNormMethod<TDataType>(rVariable, rNormType);
#pragma omp parallel
{
double sum = 0.0;
#pragma omp for
for (int i = 0; i < static_cast<int>(r_container.size()); ++i)
{
const TContainerItemType& r_item = *(r_container.begin() + i);
const TDataType& current_value =
TDataRetrievalFunctor<TContainerItemType>()(r_item, rVariable);
sum += std::pow(norm_method(current_value), 2);
}
#pragma omp atomic
global_sum += sum;
}
global_sum = rModelPart.GetCommunicator().GetDataCommunicator().SumAll(global_sum);
const double number_of_items =
rModelPart.GetCommunicator().GetDataCommunicator().SumAll(
static_cast<double>(r_container.size()));
return std::sqrt(global_sum / std::max(number_of_items, 1.0));
KRATOS_CATCH("");
}
template <class TDataType>
TDataType static CalculateMean(const ModelPart& rModelPart, const Variable<TDataType>& rVariable)
{
const TDataType& sum = CalculateSum<TDataType>(rModelPart, rVariable);
const TContainerType& r_container =
MethodUtilities::GetLocalDataContainer<TContainerType>(rModelPart);
const double number_of_items =
rModelPart.GetCommunicator().GetDataCommunicator().SumAll(
static_cast<double>(r_container.size()));
return sum * (1.0 / std::max(number_of_items, 1.0));
}
template <class TDataType>
double static CalculateNormMean(
const ModelPart& rModelPart,
const Variable<TDataType>& rVariable,
const std::string& rNormType,
Parameters Params)
{
const double sum =
CalculateNormSum<TDataType>(rModelPart, rVariable, rNormType, Params);
const TContainerType& r_container =
MethodUtilities::GetLocalDataContainer<TContainerType>(rModelPart);
const double number_of_items =
rModelPart.GetCommunicator().GetDataCommunicator().SumAll(
static_cast<double>(r_container.size()));
if (number_of_items > 0)
{
return sum * (1.0 / number_of_items);
}
return 0.0;
}
template <class TDataType>
std::tuple<TDataType, TDataType> static CalculateVariance(
const ModelPart& rModelPart, const Variable<TDataType>& rVariable)
{
TDataType mean = CalculateMean<TDataType>(rModelPart, rVariable);
TDataType global_variance = rVariable.Zero();
const TContainerType& r_container =
MethodUtilities::GetLocalDataContainer<TContainerType>(rModelPart);
if (r_container.size() > 0)
{
const TDataType& r_initial_value =
TDataRetrievalFunctor<TContainerItemType>()(*r_container.begin(), rVariable);
MethodUtilities::DataTypeSizeInitializer(global_variance, r_initial_value);
#pragma omp parallel
{
TDataType variance = rVariable.Zero();
MethodUtilities::DataTypeSizeInitializer(variance, r_initial_value);
#pragma omp for
for (int i = 0; i < static_cast<int>(r_container.size()); ++i)
{
const TContainerItemType& r_item = *(r_container.begin() + i);
const TDataType& current_value =
TDataRetrievalFunctor<TContainerItemType>()(r_item, rVariable);
MethodUtilities::DataTypeSizeChecker(current_value, variance);
variance += MethodUtilities::RaiseToPower(current_value, 2);
}
#pragma omp critical
{
global_variance += variance;
}
}
}
global_variance =
rModelPart.GetCommunicator().GetDataCommunicator().SumAll(global_variance);
const double number_of_items =
rModelPart.GetCommunicator().GetDataCommunicator().SumAll(
static_cast<double>(r_container.size()));
if (number_of_items > 0)
{
global_variance *= (1.0 / number_of_items);
global_variance -= MethodUtilities::RaiseToPower(mean, 2);
}
return std::make_tuple<TDataType, TDataType>(
std::forward<TDataType>(mean), std::forward<TDataType>(global_variance));
}
template <class TDataType>
std::tuple<double, double> static CalculateNormVariance(
const ModelPart& rModelPart,
const Variable<TDataType>& rVariable,
const std::string& rNormType,
Parameters Params)
{
double mean = CalculateNormMean<TDataType>(rModelPart, rVariable, rNormType, Params);
const TContainerType& r_container =
MethodUtilities::GetLocalDataContainer<TContainerType>(rModelPart);
double global_variance = 0.0;
const auto& norm_method =
MethodUtilities::GetNormMethod<TDataType>(rVariable, rNormType);
#pragma omp parallel
{
double variance = 0.0;
#pragma omp for
for (int i = 0; i < static_cast<int>(r_container.size()); ++i)
{
const TContainerItemType& r_item = *(r_container.begin() + i);
const TDataType& current_value =
TDataRetrievalFunctor<TContainerItemType>()(r_item, rVariable);
variance += std::pow(norm_method(current_value), 2);
}
#pragma omp atomic
global_variance += variance;
}
global_variance =
rModelPart.GetCommunicator().GetDataCommunicator().SumAll(global_variance);
const double number_of_items =
rModelPart.GetCommunicator().GetDataCommunicator().SumAll(
static_cast<double>(r_container.size()));
if (number_of_items > 0)
{
global_variance *= (1.0 / number_of_items);
global_variance -= MethodUtilities::RaiseToPower(mean, 2);
}
return std::make_tuple<double, double>(
std::forward<double>(mean), std::forward<double>(global_variance));
}
template <class TDataType>
std::tuple<double, std::size_t> static GetNormMax(
const ModelPart& rModelPart,
const Variable<TDataType>& rVariable,
const std::string& rNormType,
Parameters Params)
{
KRATOS_TRY
const TContainerType& r_container =
MethodUtilities::GetLocalDataContainer<TContainerType>(rModelPart);
double global_max = std::numeric_limits<double>::lowest();
unsigned int global_id = 0;
const auto& norm_method =
MethodUtilities::GetNormMethod<TDataType>(rVariable, rNormType);
#pragma omp parallel
{
double current_max = std::numeric_limits<double>::lowest();
unsigned int current_id = 0;
#pragma omp for
for (int i = 0; i < static_cast<int>(r_container.size()); ++i)
{
const TContainerItemType& r_item = *(r_container.begin() + i);
const TDataType& current_value =
TDataRetrievalFunctor<TContainerItemType>()(r_item, rVariable);
const double value_norm = norm_method(current_value);
if (value_norm > current_max)
{
current_max = value_norm;
current_id = r_item.Id();
}
}
#pragma omp critical
{
if (current_max > global_max)
{
global_max = current_max;
global_id = current_id;
}
}
}
const DataCommunicator& r_data_communicator =
rModelPart.GetCommunicator().GetDataCommunicator();
const auto& global_max_value_array =
r_data_communicator.AllGather(std::vector<double>{global_max});
const auto& global_max_id_array =
r_data_communicator.AllGather(std::vector<unsigned int>{global_id});
for (std::size_t i = 0; i < global_max_value_array.size(); ++i)
{
if (global_max_value_array[i] > global_max)
{
global_max = global_max_value_array[i];
global_id = global_max_id_array[i];
}
}
return std::make_tuple<double, unsigned int>(
std::forward<double>(global_max), std::forward<unsigned int>(global_id));
KRATOS_CATCH("");
}
template <class TDataType>
std::tuple<double, std::size_t> static GetNormMin(
const ModelPart& rModelPart,
const Variable<TDataType>& rVariable,
const std::string& rNormType,
Parameters Params)
{
KRATOS_TRY
const TContainerType& r_container =
MethodUtilities::GetLocalDataContainer<TContainerType>(rModelPart);
double global_min = std::numeric_limits<double>::max();
unsigned int global_id = 0;
const auto& norm_method =
MethodUtilities::GetNormMethod<TDataType>(rVariable, rNormType);
#pragma omp parallel
{
double current_min = std::numeric_limits<double>::max();
unsigned int current_id = 0;
#pragma omp for
for (int i = 0; i < static_cast<int>(r_container.size()); ++i)
{
const TContainerItemType& r_item = *(r_container.begin() + i);
const TDataType& current_value =
TDataRetrievalFunctor<TContainerItemType>()(r_item, rVariable);
const double value_norm = norm_method(current_value);
if (value_norm < current_min)
{
current_min = value_norm;
current_id = r_item.Id();
}
}
#pragma omp critical
{
if (current_min < global_min)
{
global_min = current_min;
global_id = current_id;
}
}
}
const DataCommunicator& r_data_communicator =
rModelPart.GetCommunicator().GetDataCommunicator();
const auto& global_min_value_array =
r_data_communicator.AllGather(std::vector<double>{global_min});
const auto& global_min_id_array =
r_data_communicator.AllGather(std::vector<unsigned int>{global_id});
for (std::size_t i = 0; i < global_min_value_array.size(); ++i)
{
if (global_min_value_array[i] < global_min)
{
global_min = global_min_value_array[i];
global_id = global_min_id_array[i];
}
}
return std::make_tuple<double, unsigned int>(
std::forward<double>(global_min), std::forward<unsigned int>(global_id));
KRATOS_CATCH("");
}
template <class TDataType>
double static GetNormMedian(
const ModelPart& rModelPart,
const Variable<TDataType>& rVariable,
const std::string& rNormType,
Parameters Params)
{
KRATOS_TRY
const TContainerType& r_container =
MethodUtilities::GetLocalDataContainer<TContainerType>(rModelPart);
const auto& norm_method =
MethodUtilities::GetNormMethod<TDataType>(rVariable, rNormType);
std::vector<double> local_values;
local_values.resize(r_container.size());
local_values.shrink_to_fit();
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(r_container.size()); ++i)
{
const TContainerItemType& r_item = *(r_container.begin() + i);
const TDataType& current_value =
TDataRetrievalFunctor<TContainerItemType>()(r_item, rVariable);
local_values[i] = norm_method(current_value);
}
std::sort(local_values.begin(), local_values.end());
const DataCommunicator& r_data_communicator =
rModelPart.GetCommunicator().GetDataCommunicator();
const std::vector<std::vector<double>>& global_values =
r_data_communicator.Gatherv(local_values, 0);
double median = 0.0;
if (r_data_communicator.Rank() == 0)
{
const std::vector<double>& sorted_values_list =
MethodUtilities::SortSortedValuesList(global_values);
const int number_of_values = sorted_values_list.size();
if (number_of_values > 0)
{
if (number_of_values % 2 != 0)
{
median = sorted_values_list[number_of_values / 2];
}
else
{
median = (sorted_values_list[(number_of_values - 1) / 2] +
sorted_values_list[number_of_values / 2]) *
0.5;
}
}
}
r_data_communicator.Broadcast(median, 0);
return median;
KRATOS_CATCH("");
}
template <class TDataType>
std::tuple<double, double, std::vector<double>, std::vector<int>, std::vector<double>, std::vector<double>, std::vector<double>> static GetNormDistribution(
const ModelPart& rModelPart,
const Variable<TDataType>& rVariable,
const std::string& rNormType,
Parameters Params)
{
KRATOS_TRY
Parameters default_parameters = Parameters(R"(
{
"number_of_value_groups" : 10,
"min_value" : "min",
"max_value" : "max"
})");
if (Params.Has("min_value") && Params["min_value"].IsDouble())
{
default_parameters["min_value"].SetDouble(0.0);
}
if (Params.Has("max_value") && Params["max_value"].IsDouble())
{
default_parameters["max_value"].SetDouble(0.0);
}
Params.RecursivelyValidateAndAssignDefaults(default_parameters);
double min_value{0.0};
if (Params["min_value"].IsDouble())
{
min_value = Params["min_value"].GetDouble();
}
else if (
Params["min_value"].IsString() &&
Params["min_value"].GetString() == "min")
{
const auto& min_data =
GetNormMin<TDataType>(rModelPart, rVariable, rNormType, Params);
min_value = std::get<0>(min_data);
}
else
{
KRATOS_ERROR << "Unknown min_value. Allowed only double or \"min\" "
"string as a value. [ min_value = "
<< Params["min_value"] << " ]\n.";
}
double max_value{0.0};
if (Params["max_value"].IsDouble())
{
max_value = Params["max_value"].GetDouble();
}
else if (
Params["max_value"].IsString() &&
Params["max_value"].GetString() == "max")
{
const auto& max_data =
GetNormMax<TDataType>(rModelPart, rVariable, rNormType, Params);
max_value = std::get<0>(max_data);
}
else
{
KRATOS_ERROR << "Unknown max_value. Allowed only double or \"max\" "
"string as a value. [ max_value = "
<< Params["max_value"] << " ]\n.";
}
const int number_of_groups = Params["number_of_value_groups"].GetInt();
const TContainerType& r_container =
MethodUtilities::GetLocalDataContainer<TContainerType>(rModelPart);
const auto& norm_method =
MethodUtilities::GetNormMethod<TDataType>(rVariable, rNormType);
std::vector<double> group_limits;
for (int i = 0; i < number_of_groups + 1; ++i)
{
group_limits.push_back(
min_value + (max_value - min_value) * static_cast<double>(i) /
static_cast<double>(number_of_groups));
}
// final group limit is extended by a small amount. epsilon in numeric
// limits cannot be used since testing also need to have the same
// extending value in python. Therefore hard coded value is used
group_limits[group_limits.size() - 1] += 1e-16;
group_limits.push_back(std::numeric_limits<double>::max());
group_limits.shrink_to_fit();
const int number_of_limits = group_limits.size();
std::vector<int> distribution;
std::vector<double> group_means, group_variances;
for (int i = 0; i < number_of_limits; ++i)
{
distribution.push_back(0);
group_means.push_back(0.0);
group_variances.push_back(0.0);
}
distribution.shrink_to_fit();
group_means.shrink_to_fit();
group_variances.shrink_to_fit();
#pragma omp parallel
{
std::vector<int> local_distribution;
std::vector<double> local_means, local_variances;
for (int i = 0; i < number_of_limits; ++i)
{
local_distribution.push_back(0);
local_means.push_back(0.0);
local_variances.push_back(0.0);
}
local_distribution.shrink_to_fit();
local_means.shrink_to_fit();
local_variances.shrink_to_fit();
#pragma omp for
for (int i = 0; i < static_cast<int>(r_container.size()); ++i)
{
const TContainerItemType& r_item = *(r_container.begin() + i);
const TDataType& current_value =
TDataRetrievalFunctor<TContainerItemType>()(r_item, rVariable);
const double value_norm = norm_method(current_value);
for (int i = 0; i < number_of_limits; ++i)
{
if (value_norm < group_limits[i])
{
++local_distribution[i];
local_means[i] += value_norm;
local_variances[i] += std::pow(value_norm, 2);
break;
}
}
}
#pragma omp critical
{
for (int i = 0; i < number_of_limits; ++i)
{
distribution[i] += local_distribution[i];
group_means[i] += local_means[i];
group_variances[i] += local_variances[i];
}
}
}
std::vector<int> global_distribution =
rModelPart.GetCommunicator().GetDataCommunicator().SumAll(distribution);
std::vector<double> global_mean_distribution =
rModelPart.GetCommunicator().GetDataCommunicator().SumAll(group_means);
std::vector<double> global_variance_distribution =
rModelPart.GetCommunicator().GetDataCommunicator().SumAll(group_variances);
const double number_of_items = static_cast<double>(std::max(
std::accumulate(global_distribution.begin(), global_distribution.end(), 0), 1));
std::vector<double> global_percentage_distributions;
for (int i = 0; i < number_of_limits; ++i)
{
const double number_of_values_in_group =
static_cast<double>(global_distribution[i]);
global_percentage_distributions.push_back(number_of_values_in_group / number_of_items);
if (number_of_values_in_group > 0.0)
{
global_mean_distribution[i] /= number_of_values_in_group;
global_variance_distribution[i] /= number_of_values_in_group;
global_variance_distribution[i] -=
std::pow(global_mean_distribution[i], 2);
}
}
// reversing group limit is extention
group_limits[group_limits.size() - 2] -= 1e-16;
group_limits[group_limits.size() - 1] = max_value;
return std::make_tuple<
double, double, std::vector<double>, std::vector<int>,
std::vector<double>, std::vector<double>, std::vector<double>>(
std::forward<double>(min_value), std::forward<double>(max_value),
std::forward<std::vector<double>>(group_limits),
std::forward<std::vector<int>>(global_distribution),
std::forward<std::vector<double>>(global_percentage_distributions),
std::forward<std::vector<double>>(global_mean_distribution),
std::forward<std::vector<double>>(global_variance_distribution));
KRATOS_CATCH("");
}
};
using NodeType = ModelPart::NodeType;
using ElementType = ModelPart::ElementType;
using ConditionType = ModelPart::ConditionType;
using NodesContainerType = ModelPart::NodesContainerType;
using ElementsContainerType = ModelPart::ElementsContainerType;
using ConditionsContainerType = ModelPart::ConditionsContainerType;
class HistoricalSpatialMethods
: public SpatialMethods::ContainerSpatialMethods<NodesContainerType, NodeType, MethodUtilities::HistoricalDataValueRetrievalFunctor>
{
};
class NodalNonHistoricalSpatialMethods
: public SpatialMethods::ContainerSpatialMethods<NodesContainerType, NodeType, MethodUtilities::NonHistoricalDataValueRetrievalFunctor>
{
};
class ConditionNonHistoricalSpatialMethods
: public SpatialMethods::ContainerSpatialMethods<ConditionsContainerType, ConditionType, MethodUtilities::NonHistoricalDataValueRetrievalFunctor>
{
};
class ElementNonHistoricalSpatialMethods
: public SpatialMethods::ContainerSpatialMethods<ElementsContainerType, ElementType, MethodUtilities::NonHistoricalDataValueRetrievalFunctor>
{
};
} // namespace SpatialMethods
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
///@}
///@} addtogroup block
} // namespace Kratos.
#endif // KRATOS_SPATIAL_VARIANCE_METHOD_H_INCLUDED defined
|
channel.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC H H AAA N N N N EEEEE L %
% C H H A A NN N NN N E L %
% C HHHHH AAAAA N N N N N N EEE L %
% C H H A A N NN N NN E L %
% CCCC H H A A N N N N EEEEE LLLLL %
% %
% %
% MagickCore Image Channel Methods %
% %
% Software Design %
% Cristy %
% December 2003 %
% %
% %
% Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/cache-private.h"
#include "magick/channel.h"
#include "magick/color-private.h"
#include "magick/colorspace-private.h"
#include "magick/composite-private.h"
#include "magick/exception-private.h"
#include "magick/enhance.h"
#include "magick/image.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-accessor.h"
#include "magick/resource_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/utility.h"
#include "magick/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m b i n e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CombineImages() combines one or more images into a single image. The
% grayscale value of the pixels of each image in the sequence is assigned in
% order to the specified channels of the combined image. The typical
% ordering would be image 1 => Red, 2 => Green, 3 => Blue, etc.
%
% The format of the CombineImages method is:
%
% Image *CombineImages(const Image *image,const ChannelType channel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CombineImages(const Image *image,const ChannelType channel,
ExceptionInfo *exception)
{
#define CombineImageTag "Combine/Image"
CacheView
*combine_view;
const Image
*next;
Image
*combine_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Ensure the image are the same size.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
for (next=image; next != (Image *) NULL; next=GetNextImageInList(next))
{
if ((next->columns != image->columns) || (next->rows != image->rows))
ThrowImageException(OptionError,"ImagesAreNotTheSameSize");
}
combine_image=CloneImage(image,0,0,MagickTrue,exception);
if (combine_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(combine_image,DirectClass) == MagickFalse)
{
InheritException(exception,&combine_image->exception);
combine_image=DestroyImage(combine_image);
return((Image *) NULL);
}
if (IssRGBCompatibleColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(combine_image,sRGBColorspace);
if ((channel & OpacityChannel) != 0)
combine_image->matte=MagickTrue;
(void) SetImageBackgroundColor(combine_image);
/*
Combine images.
*/
status=MagickTrue;
progress=0;
combine_view=AcquireAuthenticCacheView(combine_image,exception);
for (y=0; y < (ssize_t) combine_image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
PixelPacket
*pixels;
register const PixelPacket
*restrict p;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(combine_view,0,y,combine_image->columns,
1,exception);
if (pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
next=image;
if (((channel & RedChannel) != 0) && (next != (Image *) NULL))
{
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
q=pixels;
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(GetPixelIntensity(image,p)));
p++;
q++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (((channel & GreenChannel) != 0) && (next != (Image *) NULL))
{
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
q=pixels;
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
SetPixelGreen(q,ClampToQuantum(GetPixelIntensity(image,p)));
p++;
q++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (((channel & BlueChannel) != 0) && (next != (Image *) NULL))
{
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
q=pixels;
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
SetPixelBlue(q,ClampToQuantum(GetPixelIntensity(image,p)));
p++;
q++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (((channel & OpacityChannel) != 0) && (next != (Image *) NULL))
{
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
q=pixels;
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
SetPixelAlpha(q,ClampToQuantum(GetPixelIntensity(image,p)));
p++;
q++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) && (next != (Image *) NULL))
{
IndexPacket
*indexes;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
indexes=GetCacheViewAuthenticIndexQueue(combine_view);
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
SetPixelIndex(indexes+x,ClampToQuantum(GetPixelIntensity(image,p)));
p++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (SyncCacheViewAuthenticPixels(combine_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,CombineImageTag,progress++,
combine_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
combine_view=DestroyCacheView(combine_view);
if (IsGrayColorspace(combine_image->colorspace) != MagickFalse)
(void) TransformImageColorspace(combine_image,sRGBColorspace);
if (status == MagickFalse)
combine_image=DestroyImage(combine_image);
return(combine_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e A l p h a C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageAlphaChannel() returns MagickFalse if the image alpha channel is
% not activated. That is, the image is RGB rather than RGBA or CMYK rather
% than CMYKA.
%
% The format of the GetImageAlphaChannel method is:
%
% MagickBooleanType GetImageAlphaChannel(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType GetImageAlphaChannel(const Image *image)
{
assert(image != (const Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
return(image->matte);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p a r a t e I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SeparateImageChannel() separates a channel from the image and returns it as
% a grayscale image. A channel is a particular color component of each pixel
% in the image.
%
% The format of the SeparateImageChannel method is:
%
% MagickBooleanType SeparateImageChannel(Image *image,
% const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: Identify which channel to extract: RedChannel, GreenChannel,
% BlueChannel, OpacityChannel, CyanChannel, MagentaChannel,
% YellowChannel, or BlackChannel.
%
*/
MagickExport Image *SeparateImage(const Image *image,const ChannelType channel,
ExceptionInfo *exception)
{
Image
*separate_image;
MagickBooleanType
status;
/*
Initialize separate image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
separate_image=CloneImage(image,0,0,MagickTrue,exception);
if (separate_image == (Image *) NULL)
return((Image *) NULL);
status=SeparateImageChannel(separate_image,channel);
if (status == MagickFalse)
separate_image=DestroyImage(separate_image);
return(separate_image);
}
MagickExport MagickBooleanType SeparateImageChannel(Image *image,
const ChannelType channel)
{
#define SeparateImageTag "Separate/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (channel == GrayChannels)
image->matte=MagickTrue;
/*
Separate image channels.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
switch (channel)
{
case RedChannel:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
q++;
}
break;
}
case GreenChannel:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelGreen(q));
SetPixelBlue(q,GetPixelGreen(q));
q++;
}
break;
}
case BlueChannel:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelBlue(q));
SetPixelGreen(q,GetPixelBlue(q));
q++;
}
break;
}
case OpacityChannel:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelOpacity(q));
SetPixelGreen(q,GetPixelOpacity(q));
SetPixelBlue(q,GetPixelOpacity(q));
q++;
}
break;
}
case BlackChannel:
{
if ((image->storage_class != PseudoClass) &&
(image->colorspace != CMYKColorspace))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelIndex(indexes+x));
SetPixelGreen(q,GetPixelIndex(indexes+x));
SetPixelBlue(q,GetPixelIndex(indexes+x));
q++;
}
break;
}
case TrueAlphaChannel:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelAlpha(q));
SetPixelGreen(q,GetPixelAlpha(q));
SetPixelBlue(q,GetPixelAlpha(q));
q++;
}
break;
}
case GrayChannels:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelAlpha(q,ClampToQuantum(GetPixelIntensity(image,q)));
q++;
}
break;
}
default:
break;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SeparateImageChannel)
#endif
proceed=SetImageProgress(image,SeparateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
if (channel != GrayChannels)
image->matte=MagickFalse;
(void) SetImageColorspace(image,GRAYColorspace);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p a r a t e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SeparateImages() returns a separate grayscale image for each channel
% specified.
%
% The format of the SeparateImages method is:
%
% MagickBooleanType SeparateImages(const Image *image,
% const ChannelType channel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: Identify which channels to extract: RedChannel, GreenChannel,
% BlueChannel, OpacityChannel, CyanChannel, MagentaChannel,
% YellowChannel, or BlackChannel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SeparateImages(const Image *image,const ChannelType channel,
ExceptionInfo *exception)
{
Image
*images,
*separate_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
images=NewImageList();
if ((channel & RedChannel) != 0)
{
separate_image=CloneImage(image,0,0,MagickTrue,exception);
(void) SeparateImageChannel(separate_image,RedChannel);
AppendImageToList(&images,separate_image);
}
if ((channel & GreenChannel) != 0)
{
separate_image=CloneImage(image,0,0,MagickTrue,exception);
(void) SeparateImageChannel(separate_image,GreenChannel);
AppendImageToList(&images,separate_image);
}
if ((channel & BlueChannel) != 0)
{
separate_image=CloneImage(image,0,0,MagickTrue,exception);
(void) SeparateImageChannel(separate_image,BlueChannel);
AppendImageToList(&images,separate_image);
}
if (((channel & BlackChannel) != 0) && (image->colorspace == CMYKColorspace))
{
separate_image=CloneImage(image,0,0,MagickTrue,exception);
(void) SeparateImageChannel(separate_image,BlackChannel);
AppendImageToList(&images,separate_image);
}
if ((channel & AlphaChannel) != 0)
{
separate_image=CloneImage(image,0,0,MagickTrue,exception);
(void) SeparateImageChannel(separate_image,TrueAlphaChannel);
AppendImageToList(&images,separate_image);
}
return(images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e A l p h a C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageAlphaChannel() activates, deactivates, resets, or sets the alpha
% channel.
%
% The format of the SetImageAlphaChannel method is:
%
% MagickBooleanType SetImageAlphaChannel(Image *image,
% const AlphaChannelType alpha_type)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o alpha_type: The alpha channel type: ActivateAlphaChannel,
% AssociateAlphaChannel, CopyAlphaChannel, Disassociate,
% DeactivateAlphaChannel, ExtractAlphaChannel, OpaqueAlphaChannel,
% ResetAlphaChannel, SetAlphaChannel, ShapeAlphaChannel, and
% TransparentAlphaChannel.
%
*/
MagickExport MagickBooleanType SetImageAlphaChannel(Image *image,
const AlphaChannelType alpha_type)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
exception=(&image->exception);
status=MagickTrue;
switch (alpha_type)
{
case ActivateAlphaChannel:
{
image->matte=MagickTrue;
break;
}
case AssociateAlphaChannel:
{
/*
Associate alpha.
*/
status=SetImageStorageClass(image,DirectClass);
if (status == MagickFalse)
break;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
alpha;
alpha=QuantumScale*GetPixelAlpha(q);
SetPixelRed(q,ClampToQuantum(alpha*GetPixelRed(q)));
SetPixelGreen(q,ClampToQuantum(alpha*GetPixelGreen(q)));
SetPixelBlue(q,ClampToQuantum(alpha*GetPixelBlue(q)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->matte=MagickFalse;
break;
}
case BackgroundAlphaChannel:
{
IndexPacket
index;
MagickBooleanType
status;
MagickPixelPacket
background;
PixelPacket
pixel;
/*
Set transparent pixels to background color.
*/
if (image->matte == MagickFalse)
break;
status=SetImageStorageClass(image,DirectClass);
if (status == MagickFalse)
break;
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *)
NULL,&background);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&background);
index=0;
SetPixelPacket(image,&background,&pixel,&index);
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (q->opacity == TransparentOpacity)
{
SetPixelRed(q,pixel.red);
SetPixelGreen(q,pixel.green);
SetPixelBlue(q,pixel.blue);
}
q++;
}
if (image->colorspace == CMYKColorspace)
{
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,index);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
case CopyAlphaChannel:
case ShapeAlphaChannel:
{
/*
Special usage case for SeparateImageChannel(): copy grayscale color to
the alpha channel.
*/
status=SeparateImageChannel(image,GrayChannels);
image->matte=MagickTrue; /* make sure transparency is now on! */
if (alpha_type == ShapeAlphaChannel)
{
MagickPixelPacket
background;
/*
Reset all color channels to background color.
*/
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&(image->background_color),(IndexPacket *)
NULL,&background);
(void) LevelColorsImage(image,&background,&background,MagickTrue);
}
break;
}
case DeactivateAlphaChannel:
{
image->matte=MagickFalse;
break;
}
case DisassociateAlphaChannel:
{
status=SetImageStorageClass(image,DirectClass);
if (status == MagickFalse)
break;
image->matte=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
alpha;
alpha=QuantumScale*GetPixelAlpha(q);
alpha=PerceptibleReciprocal(alpha);
SetPixelRed(q,ClampToQuantum(alpha*GetPixelRed(q)));
SetPixelGreen(q,ClampToQuantum(alpha*GetPixelGreen(q)));
SetPixelBlue(q,ClampToQuantum(alpha*GetPixelBlue(q)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->matte=MagickFalse;
break;
}
case ExtractAlphaChannel:
{
status=SeparateImageChannel(image,TrueAlphaChannel);
image->matte=MagickFalse;
break;
}
case RemoveAlphaChannel:
case FlattenAlphaChannel:
{
IndexPacket
index;
MagickPixelPacket
background;
PixelPacket
pixel;
/*
Flatten image pixels over the background pixels.
*/
if (image->matte == MagickFalse)
break;
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
break;
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *)
NULL,&background);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&background);
index=0;
SetPixelPacket(image,&background,&pixel,&index);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma,
opacity;
gamma=1.0-QuantumScale*QuantumScale*q->opacity*pixel.opacity;
opacity=(double) QuantumRange*(1.0-gamma);
gamma=PerceptibleReciprocal(gamma);
q->red=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->red,
(MagickRealType) q->opacity,(MagickRealType) pixel.red,
(MagickRealType) pixel.opacity));
q->green=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->green,
(MagickRealType) q->opacity,(MagickRealType) pixel.green,
(MagickRealType) pixel.opacity));
q->blue=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->blue,
(MagickRealType) q->opacity,(MagickRealType) pixel.blue,
(MagickRealType) pixel.opacity));
q->opacity=ClampToQuantum(opacity);
q++;
}
if (image->colorspace == CMYKColorspace)
{
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,index);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
case ResetAlphaChannel: /* deprecated */
case OpaqueAlphaChannel:
{
status=SetImageOpacity(image,OpaqueOpacity);
break;
}
case SetAlphaChannel:
{
if (image->matte == MagickFalse)
status=SetImageOpacity(image,OpaqueOpacity);
break;
}
case TransparentAlphaChannel:
{
status=SetImageOpacity(image,TransparentOpacity);
break;
}
case UndefinedAlphaChannel:
break;
}
if (status == MagickFalse)
return(status);
return(SyncImagePixelCache(image,&image->exception));
}
|
budgeted-train.c | /*
============================================================================
Author : Roberto Diaz Morales
============================================================================
Copyright (c) 2016 Roberto Díaz Morales
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files
(the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
============================================================================
*/
/**
* @brief Implementation of the functions to train a budgeted SVM.
*
* See budgeted-train.h for a detailed description of its functions and parameters.
*
* For a detailed description of the algorithm and its parameters read the following paper:
*
* Díaz-Morales, R., & Navia-Vázquez, Á. (2016). Efficient parallel implementation of kernel methods. Neurocomputing, 191, 175-186.
*
* @file budgeted-train.c
* @author Roberto Diaz Morales
* @date 23 Aug 2016
* @see budgeted-train.h
*
*/
#include <omp.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <time.h>
#include <sys/time.h>
#include "budgeted-train.h"
#include "kernels.h"
#include "ParallelAlgorithms.h"
/**
* @cond
*/
extern void dgemm_(char *transa, char *transb, int *m, int *n, int *k, double
*alpha, double *a, int *lda, double *b, int *ldb, double *beta, double *c,
int *ldc );
extern void dpotrs_(char *uplo, int *n, int *nrhs, double *A, int *lda,
double *B, int *ldb, int *info);
/**
* @brief Random selection of centroids for the budgeted model
*
* It creates a random permutation and selects the first elements to be the indexes of the centroids of the budgeted model.
*
* @param dataset The training set.
* @param props The struct with the training parameters.
*/
int* randomCentroids(svm_dataset dataset,properties props){
int* permut = malloc(dataset.l * sizeof(int));
int i;
// initial range of numbers
for(i=0;i<dataset.l;++i){
permut[i]=i;
}
for (i = dataset.l-1; i >= 0; --i){
//generate a random number [0, n-1]
int j = rand() % (i+1);
//swap the last element with element at random index
int temp = permut[i];
permut[i] = permut[j];
permut[j] = temp;
}
int* centroids = malloc(props.size * sizeof(int));
for (i = 0; i < props.size; i++){
centroids[i]=permut[i];
}
free(permut);
return centroids;
}
/**
* @brief Sparse Greedy Matrix Approximation algorithm
*
* Sparse Greedy Matrix Approximation algorithm to select the basis elements of the budgeted model. For a detailed description read:
*
* Díaz-Morales, R., & Navia-Vázquez, Á. (2016). Efficient parallel implementation of kernel methods. Neurocomputing, 191, 175-186.
*
* @param dataset The training set.
* @param props The struct with the training parameters.
*/
int* SGMA(svm_dataset dataset,properties props){
//TO STORE ERROR DESCENT AND SAMPLE INDEX
double *descE=(double *) malloc(64*sizeof(double));
int *indexes=(int *) malloc(64*sizeof(int));
int *centroids=(int *) malloc((props.size)*sizeof(int));
//Memory to every thread
double **KNC = (double **) malloc(64*sizeof(double *));
double **KSM = (double **) malloc(64*sizeof(double *));
double *eta=(double *) malloc(64*sizeof(double));
double *KSC = (double *) malloc((dataset.l)*(props.size)*sizeof(double));
double **Z = (double **) malloc(64*sizeof(double *));
//Cholesky decomposition and inverse
double *iKC = (double *) calloc((props.size)*(props.size),sizeof(double));
double *invKC = (double *) calloc((props.size)*(props.size),sizeof(double));
double *iKCTmp = (double *) calloc((props.size)*(props.size),sizeof(double));
double *invKCTmp = (double *) calloc((props.size)*(props.size),sizeof(double));
double *L2 = (double *) calloc((props.size),sizeof(double));
double *IL2 = (double *) calloc((props.size),sizeof(double));
int size = 0;
int i,e,bestBasis,ncols=1,info=1;
double factor=-1;
double factorA=1.0;
char s = 'L';
char trans = 'N';
double *miKSM;
double *miKNC;
double *miZ;
double value,L3,IL3;
double *tmp1,*tmp2;
int indexSample=0;
for(i=0;i<64;i++){
KNC[i]=(double *) malloc((props.size)*sizeof(double));
KSM[i]=(double *) malloc((dataset.l)*sizeof(double));
Z[i]=(double *) malloc((props.size)*sizeof(double));
}
while(size<props.size){
if(size>1){
#pragma omp parallel default(shared) private(i,e,miKSM,miKNC,miZ,value)
{
#pragma omp for schedule(static)
for(i=0;i<64;i++){
indexSample=rand()%(dataset.l);
while(dataset.y[indexSample] != ((i%2)*2.0-1)){
indexSample=rand()%(dataset.l);
}
indexes[i]=indexSample;
/*
if(size==2 && i>0){
KNC[i]=(double *) malloc((props.size)*sizeof(double));
KSM[i]=(double *) malloc((dataset.l)*sizeof(double));
Z[i]=(double *) malloc((props.size)*sizeof(double));
}*/
miKNC=KNC[i];
miKSM=KSM[i];
miZ=Z[i];
for(e=0;e<dataset.l;e++) miKSM[e]=kernelFunction(dataset,indexes[i],e,props);
for(e=0;e<size;e++){
value=kernelFunction(dataset,indexes[i],centroids[e],props);
miKNC[e]=value;
miZ[e]=value;
}
value=1.0;
if(size==0){
eta[i]=value;
}else{
dpotrs_(&s,&size,&ncols, iKC, &size, miZ,&size,&info);
for(e=0;e<size;e++) value = value - (miKNC[e]*miZ[e]);
eta[i]=value;
dgemm_(&trans, &trans, &(dataset.l), &ncols, &size,&factorA, KSC, &(dataset.l), miZ, &size, &factor, miKSM, &(dataset.l));
}
value=0.0;
for(e=0;e<dataset.l;e++) value +=miKSM[e]*miKSM[e];
if(eta[i]>0.0){
descE[i]=(1.0/eta[i])*value;
}else{
descE[i]=0.0;
}
}
}
value=descE[0];
bestBasis=0;
for(i=1;i<64;i++){
if(descE[i]>value){
value=descE[i];
bestBasis=i;
}
}
centroids[size]=indexes[bestBasis];
}else{
if(size==0){
//KNC[0]=(double *) malloc((props.size)*sizeof(double));
//KSM[0]=(double *) malloc((dataset.l)*sizeof(double));
//Z[0]=(double *) malloc((props.size)*sizeof(double));
centroids[size]=dataset.l;
}else{
centroids[size]=dataset.l+1;
KNC[0][0]=kernelFunction(dataset,centroids[0],centroids[1],props);
}
value=1.0;
bestBasis=0;
}
if(props.verbose==1){
if(size==0) printf("Best Error Descent %f, Average of positive data is centroid %d\n",value,size);
if(size==1) printf("Best Error Descent %f, Average of negative data is centroid %d\n",value,size);
if(size>1) printf("Best Error Descent %f, Data with index %d is centroid %d\n",value,centroids[size],size);
}
#pragma omp parallel default(shared) private(i)
{
#pragma omp for schedule(static)
for(i=0;i<dataset.l;i++) KSC[size*(dataset.l)+i]=kernelFunction(dataset,i,centroids[size],props);
}
if(size==0){
iKCTmp[0]=pow(kernelFunction(dataset,centroids[size],centroids[size],props)+0.000001,0.5);
invKCTmp[0]=1.0/iKCTmp[0];
}else{
ParallelVectorMatrixT(KNC[bestBasis],size,invKC,L2,props.Threads);
L3=kernelFunction(dataset,centroids[size],centroids[size],props)+0.00001;
for(i=0;i<size;i++) L3 = L3 - (L2[i]*L2[i]);
L3=pow(L3,0.5);
IL3=1.0/L3;
ParallelVectorMatrix(L2,size,invKC,IL2,props.Threads);
for(i=0;i<size;i++){
for(e=0;e<size;e++){
iKCTmp[(i*(size+1))+e]=iKC[(i*size)+e];
invKCTmp[(i*(size+1))+e]=invKC[(i*size)+e];
}
}
iKCTmp[(size+1)*(size+1)-1]=L3;
invKCTmp[(size+1)*(size+1)-1]=IL3;
for(i=0;i<size;i++){
iKCTmp[(i*(size+1))+size]=L2[i];
invKCTmp[(i*(size+1))+size]=-IL3*IL2[i];
}
}
tmp1=&iKC[0];
tmp2=&invKC[0];
iKC=&iKCTmp[0];
invKC=&invKCTmp[0];
iKCTmp=&tmp1[0];
invKCTmp=&tmp2[0];
++size;
}
for(i=0;i<64;i++){
free(KNC[i]);
free(KSM[i]);
free(Z[i]);
}
free(KNC);
free(KSM);
free(eta);
free(Z);
free(KSC);
free(iKC);
free(invKC);
free(iKCTmp);
free(invKCTmp);
free(L2);
free(IL2);
free(indexes);
free(descE);
return centroids;
}
/**
* @brief Iterative Re-Weighted Least Squares Algorithm.
*
* IRWLS procedure to obtain the weights of the budgeted model. For a detailed description of the algorithm and parallelization:
*
* Díaz-Morales, R., & Navia-Vázquez, Á. (2016). Efficient parallel implementation of kernel methods. Neurocomputing, 191, 175-186.
*
* @param dataset The training set.
* @param indexes The indexes of the centroids selected by the SGMA algorithm.
* @param props The struct with the training parameters.
* @return The weights of every centroid.
*/
double* IRWLSpar(svm_dataset dataset, int* indexes,properties props){
int i;
double kernelvalue;
double *KC=(double *) calloc(props.size*props.size,sizeof(double));
double *KSC=(double *) calloc(dataset.l*props.size,sizeof(double));
double *KSCA=(double *) calloc(dataset.l*props.size,sizeof(double));
double *Da=(double *) calloc(dataset.l,sizeof(double));
double *Day=(double *) calloc(dataset.l,sizeof(double));
#pragma omp parallel for
for (i=0;i<props.size;i++){
int j=0;
for (j=0;j<props.size;j++){
KC[i*(props.size)+j]=kernelFunction(dataset,indexes[i], indexes[j], props);
if(i==j) KC[i*(props.size)+j]+=pow(10,-5);
}
}
double M=10000.0;
#pragma omp parallel for
for (i=0;i<dataset.l;i++){
Da[i]=M;
Day[i]=dataset.y[i]*M;
int j = 0;
for (j=0;j<props.size;j++){
kernelvalue=kernelFunction(dataset,i, indexes[j], props);
KSC[i*(props.size)+j]=kernelvalue;
KSCA[i*(props.size)+j]=kernelvalue;
}
}
//Stop conditions
int iter=0, max_iter=500,cambios=100, trueSVs=0;
double deltaW = 1e9, normW = 1.0;
double *K1 = (double *) calloc(props.size*props.size,sizeof(double));
double *K2 = (double *) calloc(props.size,sizeof(double));
double *beta = (double *) calloc(props.size,sizeof(double));
double *betaNew = (double *) calloc(props.size,sizeof(double));
double *betaBest = (double *) calloc(props.size,sizeof(double));
double *e = (double *) calloc(dataset.l,sizeof(double));
int *indKSCA = (int *) calloc(dataset.l,sizeof(int));
char notrans='N';
char trans='T';
int row = 1;
double factor=1.0;
double nfactor=-1.0;
double zfactor=0.0;
double val;
double oldnorm=0.0;
int itersSinceBestDW=0;
double bestDW=1e9;
int thLS=(int) pow(2,floor(log(props.Threads)/log(2.0)));
if(props.size<thLS) thLS=pow(2,floor(log(props.size)/log(2.0)));
if(thLS<1) thLS=1;
int tamDgemm = props.Threads;
if (props.size<props.Threads) tamDgemm = props.size;
trueSVs=dataset.l;
while( (iter<max_iter) && (deltaW/normW > 1e-6) && (itersSinceBestDW<5) ){
memcpy(K1,KC,(props.size)*(props.size)*sizeof(double));
if(trueSVs>0){
#pragma omp parallel for
for (i=0;i<tamDgemm;i++){
int InitCol=round(i*props.size/tamDgemm);
int FinalCol=round((i+1)*props.size/tamDgemm)-1;
int lengthCol=FinalCol-InitCol+1;
if(lengthCol>0){
dgemm_(¬rans, ¬rans, &(lengthCol), &(row), &(trueSVs), &factor, &KSCA[InitCol], &(props.size), Day, &trueSVs, &zfactor, &K2[InitCol], &(props.size));
dgemm_(¬rans, &trans, &(lengthCol), &(props.size), &(trueSVs), &factor, &KSCA[InitCol], &(props.size), KSCA, &props.size, &factor, &K1[InitCol], &(props.size));
}
}
}else{
memset(K2,0.0,props.size*sizeof(double));
}
memset(betaNew,0.0,props.size*sizeof(double));
omp_set_num_threads(thLS);
ParallelLinearSystem(K1,props.size,props.size,0,0,K2,props.size,1,0,0,props.size,1,betaNew,props.size,1,0,0,thLS);
omp_set_num_threads(props.Threads);
deltaW=0.0;
normW=0.0;
for (i=0;i<props.size;i++){
deltaW += pow(betaNew[i]-beta[i],2);
normW += pow(betaNew[i],2);
beta[i]=betaNew[i];
}
memcpy(e,dataset.y,dataset.l*sizeof(double));
#pragma omp parallel for
for (i=0;i<tamDgemm;i++){
int InitCol=round(i*dataset.l/tamDgemm);
int FinalCol=round((i+1)*dataset.l/tamDgemm)-1;
int lengthCol=FinalCol-InitCol+1;
if(lengthCol>0){
dgemm_(¬rans, ¬rans, &(row), &(lengthCol), &(props.size), &nfactor, beta, &row, &KSC[InitCol*props.size], &props.size, &factor, &e[InitCol], &(row));
}
}
double alpha,chi;
#pragma omp parallel for
for(i=0;i<dataset.l;i++){
if(e[i]*dataset.y[i]<0.0){
Da[i]=0.0;
}else{
Da[i]=1.0*props.C/(dataset.y[i]*e[i]);
}
if(Da[i]>M) Da[i]=M;
}
trueSVs=0;
for(i=0;i<dataset.l;i++){
if(Da[i]!=0.0){
indKSCA[trueSVs]=i;
++trueSVs;
}
}
#pragma omp parallel for
for (i=0;i<trueSVs;i++){
int j = 0;
for (j=0;j<props.size;j++){
KSCA[i*(props.size)+j]=sqrt(Da[indKSCA[i]])*KSC[indKSCA[i]*(props.size)+j];
}
Day[i]=sqrt(Da[indKSCA[i]])*dataset.y[indKSCA[i]];
}
++iter;
if(props.verbose==1) printf("Iteration %d, nSVs %d, ||deltaW||^2/||W||^2=%f\n",iter,trueSVs,deltaW/normW);
if(iter>10 && deltaW/normW>100*oldnorm) M=M/10.0;
oldnorm=deltaW/normW;
if(deltaW/normW<bestDW){
bestDW=deltaW/normW;
itersSinceBestDW=0;
memcpy(betaBest,betaNew,(props.size)*sizeof(double));
}else{
itersSinceBestDW+=1;
}
}
free(KC);
free(KSC);
free(KSCA);
free(Da);
free(Day);
free(K1);
free(K2);
free(beta);
free(betaNew);
free(e);
free(indKSCA);
return betaBest;
}
/**
* @brief It converts the result into a model struct.
*
* After the training of a budgeted SVM using the parallel IRWLS procedure, this function build a struct with the information and returns it.
*
* @param props The training parameters.
* @param dataset The training set.
* @param centroids of the selected centroids by the SGMA algorithm.
* @param beta The weights of every centroid obtained with the IRWLS algorithm.
* @return The struct that storages all the information of the classifier.
*/
model calculateBudgetedModel(properties props, svm_dataset dataset, int *centroids, double * beta ){
model classifier;
classifier.Kgamma = props.Kgamma;
classifier.sparse = dataset.sparse;
classifier.maxdim = dataset.maxdim;
classifier.nSVs = props.size;
classifier.bias=0.0;
classifier.kernelType = props.kernelType;
int nElem=0;
svm_sample *iteratorSample;
svm_sample *classifierSample;
int i;
for (i =0;i<props.size;i++){
iteratorSample = dataset.x[centroids[i]];
while (iteratorSample->index != -1){
++iteratorSample;
++nElem;
}
++nElem;
}
classifier.nElem = nElem;
classifier.weights = (double *) calloc(props.size,sizeof(double));
memcpy(classifier.weights,beta,props.size*sizeof(double));
classifier.quadratic_value = (double *) calloc(props.size,sizeof(double));
classifier.x = (svm_sample **) calloc(props.size,sizeof(svm_sample *));
classifier.features = (svm_sample *) calloc(nElem,sizeof(svm_sample));
int indexIt=0;
int featureIt=0;
for (i =0;i<props.size;i++){
classifier.quadratic_value[i]=dataset.quadratic_value[centroids[i]];
classifier.x[i] = &classifier.features[featureIt];
iteratorSample = dataset.x[centroids[i]];
classifierSample = classifier.x[i];
while (iteratorSample->index != -1){
classifierSample->index = iteratorSample->index;
classifierSample->value = iteratorSample->value;
++classifierSample;
++iteratorSample;
++featureIt;
}
classifierSample->index = iteratorSample->index;
++featureIt;
}
return classifier;
}
/**
* @brief It parses input command line to extract the parameters of the budgeted algorithm.
*
* It parses input command line to extract the parameters.
* @param argc The number of words of the command line.
* @param argv The list of words of the command line.
* @return A struct that contains the values of the training parameters of the budgeted algorithm.
*/
properties parseTrainParameters(int* argc, char*** argv) {
properties props;
props.Kgamma = 1.0;
props.C = 1.0;
props.Threads=1;
props.MaxSize=500;
props.Eta=0.001;
props.size=10;
props.algorithm=1;
props.kernelType=1;
props.file = 1;
props.separator = ",";
props.verbose = 1;
int i,j;
for (i = 1; i < *argc; ++i) {
if ((*argv)[i][0] != '-') break;
if (++i >= *argc) {
printBudgetedInstructions();
exit(1);
}
char* param_name = &(*argv)[i-1][1];
char* param_value = (*argv)[i];
if (strcmp(param_name, "g") == 0) {
props.Kgamma = atof(param_value);
} else if (strcmp(param_name, "c") == 0) {
props.C = atof(param_value);
} else if (strcmp(param_name, "e") == 0) {
props.Eta = atof(param_value);
}else if (strcmp(param_name, "k") == 0) {
props.kernelType = atoi(param_value);
}else if (strcmp(param_name, "t") == 0) {
props.Threads = atoi(param_value);
} else if (strcmp(param_name, "w") == 0) {
props.MaxSize = atoi(param_value);
} else if (strcmp(param_name, "s") == 0) {
props.size = atoi(param_value);
} else if (strcmp(param_name, "a") == 0) {
props.algorithm = atoi(param_value);
} else if (strcmp(param_name, "f") == 0) {
props.file = atoi(param_value);
} else if (strcmp(param_name, "p") == 0) {
props.separator = param_value;
} else if (strcmp(param_name, "v") == 0) {
props.verbose = atoi(param_value);
} else {
fprintf(stderr, "Unknown parameter %s\n",param_name);
printBudgetedInstructions();
exit(2);
}
}
for (j = 1; i + j - 1 < *argc; ++j) {
(*argv)[j] = (*argv)[i + j - 1];
}
*argc -= i - 1;
return props;
}
/**
* @brief Print Instructios.
*
* Print the budgeted-train command line instructions in the standard output.
*/
void printBudgetedInstructions(void) {
fprintf(stderr, "budgeted-train: This software train the sparse SVM on the given training set ");
fprintf(stderr, "and generages a model for futures prediction use.\n\n");
fprintf(stderr, "Usage: budgeted-train [options] training_set_file model_file\n\n");
fprintf(stderr, "Options:\n");
fprintf(stderr, " -k kernel type: (default 1)\n");
fprintf(stderr, " 0 -- Linear kernel u'*v\n");
fprintf(stderr, " 1 -- radial basis function: exp(-gamma*|u-v|^2)\n");
fprintf(stderr, " -g gamma: set gamma in radial basis kernel function (default 1)\n");
fprintf(stderr, " radial basis K(u,v)= exp(-gamma*|u-v|^2)\n");
fprintf(stderr, " -c Cost: set SVM Cost (default 1)\n");
fprintf(stderr, " -t Threads: Number of threads (default 1)\n");
fprintf(stderr, " -s Classifier size: Size of the classifier (default 1)\n");
fprintf(stderr, " -a Algorithm: Algorithm for centroids selection (default 1)\n");
fprintf(stderr, " 0 -- Random Selection\n");
fprintf(stderr, " 1 -- SGMA (Sparse Greedy Matrix Approximation)\n");
fprintf(stderr, " -f file format: (default 1)\n");
fprintf(stderr, " 0 -- CSV format (comma separator)\n");
fprintf(stderr, " 1 -- libsvm format\n");
fprintf(stderr, " -p separator: csv separator character (default \",\" if csv format is selected)\n");
fprintf(stderr, " -v verbose: (default 1)\n");
fprintf(stderr, " 0 -- No screen messages\n");
fprintf(stderr, " 1 -- Screen messages\n");
}
/**
* @endcond
*/
|
GB_unop__minv_int8_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__minv_int8_int8
// op(A') function: GB_unop_tran__minv_int8_int8
// C type: int8_t
// A type: int8_t
// cast: int8_t cij = aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 8)
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 8) ;
// casting
#define GB_CAST(z, aij) \
int8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = aij ; \
Cx [pC] = GB_IMINV_SIGNED (z, 8) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__minv_int8_int8
(
int8_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int8_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = GB_IMINV_SIGNED (z, 8) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = GB_IMINV_SIGNED (z, 8) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__minv_int8_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
relu_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: bhu@openailab.com
*/
#include <math.h>
#include "sys_port.h"
#include "module.h"
#include "tengine_ir.h"
#include "../../cpu_node_ops.h"
#include "tengine_op.h"
#include "relu_param.h"
static int ref_relu_fp32(struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, float negative_slope,
int num_thread)
{
int batch = input_tensor->dims[0];
int channels = input_tensor->dims[1];
int h = input_tensor->dims[2];
int w = input_tensor->dims[3];
int size = h * w;
int c_step = h * w;
int batch_step = channels * c_step;
float* input_data = input_tensor->data;
float* out_data = output_tensor->data;
if (negative_slope == 0)
{
for (int n = 0; n < batch; n++)
{
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < channels; q++)
{
float* src = input_data + batch_step * n + c_step * q;
float* dst = out_data + batch_step * n + c_step * q;
for (int i = 0; i < size; i++)
{
if (src[i] < 0)
dst[i] = 0;
else
dst[i] = src[i];
}
}
}
}
else
{
for (int n = 0; n < batch; n++)
{
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < channels; q++)
{
float* src = input_data + batch_step * n + c_step * q;
float* dst = out_data + batch_step * n + c_step * q;
for (int i = 0; i < size; i++)
{
if (src[i] < 0)
dst[i] = src[i] * negative_slope;
else
dst[i] = src[i];
}
}
}
}
return 0;
}
static int ref_relu_uint8(struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, float negative_slope,
int num_thread)
{
int batch = input_tensor->dims[0];
int channels = input_tensor->dims[1];
int h = input_tensor->dims[2];
int w = input_tensor->dims[3];
int size = h * w;
int c_step = h * w;
int batch_step = channels * c_step;
int total_size = batch * batch_step;
/* dequant */
uint8_t* input_uint8 = input_tensor->data;
uint8_t* output_uint8 = output_tensor->data;
float input_scale = input_tensor->scale;
float output_scale = output_tensor->scale;
int32_t input_zero = input_tensor->zero_point;
int32_t output_zero = output_tensor->zero_point;
float* data_fp32 = (float*)sys_malloc(total_size * sizeof(float));
for(int i=0; i<total_size; i++)
{
data_fp32[i] = ((float )input_uint8[i] - (float )input_zero) * input_scale;
}
/* process */
if (negative_slope == 0)
{
for (int n = 0; n < batch; n++)
{
//#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < channels; q++)
{
float* src = data_fp32 + batch_step * n + c_step * q;
float* dst = data_fp32 + batch_step * n + c_step * q;
for (int i = 0; i < size; i++)
{
if (src[i] < 0)
dst[i] = 0;
else
dst[i] = src[i];
}
}
}
}
else
{
for (int n = 0; n < batch; n++)
{
//#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < channels; q++)
{
float* src = data_fp32 + batch_step * n + c_step * q;
float* dst = data_fp32 + batch_step * n + c_step * q;
for (int i = 0; i < size; i++)
{
if (src[i] < 0)
dst[i] = src[i] * negative_slope;
else
dst[i] = src[i];
}
}
}
}
/* quant */
for(int i=0; i<total_size; i++)
{
int udata = round(data_fp32[i] / output_scale + output_zero);
if (udata > 255)
udata = 255;
else if (udata < 0)
udata = 0;
output_uint8[i] = udata;
}
sys_free(data_fp32);
return 0;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* ir_node = exec_node->ir_node;
struct ir_graph* ir_graph = ir_node->graph;
struct ir_tensor* input_tensor;
struct ir_tensor* output_tensor;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct relu_param* relu_param = ( struct relu_param* )ir_node->op.param_mem;
int ret = 0;
if (input_tensor->data_type == TENGINE_DT_FP32)
ret = ref_relu_fp32(input_tensor, output_tensor, relu_param->negative_slope, exec_graph->num_thread);
else
ret = ref_relu_uint8(input_tensor, output_tensor, relu_param->negative_slope, exec_graph->num_thread);
return ret;
}
static int reshape(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* node = exec_node->ir_node;
struct ir_graph* ir_graph = node->graph;
struct ir_tensor* input = get_ir_graph_tensor(ir_graph, node->input_tensors[0]);
struct ir_tensor* output = get_ir_graph_tensor(ir_graph, node->output_tensors[0]);
int ret = set_ir_tensor_shape(output, input->dims, input->dim_num);
return ret;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node)
{
return OPS_SCORE_CANDO;
}
static struct node_ops hcl_node_ops = {.prerun = NULL,
.run = run,
.reshape = reshape,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
static int reg_relu_hcl_ops(void* arg)
{
return register_builtin_node_ops(OP_RELU, &hcl_node_ops);
}
static int unreg_relu_hcl_ops(void* arg)
{
return unregister_builtin_node_ops(OP_RELU, &hcl_node_ops);
}
AUTO_REGISTER_OPS(reg_relu_hcl_ops);
AUTO_UNREGISTER_OPS(unreg_relu_hcl_ops);
|
matmul_double.c | /*
* Square matrix multiplication
* A[N][N] * B[N][N] = C[N][N]
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/timeb.h>
#include <malloc.h>
#define N 512
//#define N 16
// read timer in second
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
void init(double **A) {
int i, j;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
A[i][j] = (double)rand()/(double)(RAND_MAX/10.0);
}
}
}
void matmul_simd(double **A, double **B, double **C) {
int i,j,k;
double temp;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
temp = 0;
#pragma omp simd
for (k = 0; k < N; k++) {
temp = temp + A[i][k] * B[j][k];
}
C[i][j] = temp;
}
}
}
// Debug functions
void print_matrix(double **matrix) {
for (int i = 0; i<8; i++) {
printf("[");
for (int j = 0; j<8; j++) {
printf("%.2f ", matrix[i][j]);
}
puts("]");
}
puts("");
}
void matmul_serial(double **A, double **B, double **C) {
int i,j,k;
double temp;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
temp = 0;
for (k = 0; k < N; k++) {
temp += A[i][k] * B[j][k];
}
C[i][j] = temp;
}
}
}
double check(double **A, double **B){
double difference = 0;
for(int i = 0;i<N; i++){
for (int j = 0; j<N; j++)
{ difference += A[i][j]- B[i][j];}
}
return difference;
}
// Main
int main(int argc, char *argv[]) {
//Set everything up
double **A = malloc(sizeof(double*)*N);
double **B = malloc(sizeof(double*)*N);
double **C_simd = malloc(sizeof(double*)*N);
double **C_serial = malloc(sizeof(double*)*N);
double **BT = malloc(sizeof(double*)*N);
for (int i = 0; i<N; i++) {
A[i] = malloc(sizeof(double)*N);
B[i] = malloc(sizeof(double)*N);
C_simd[i] = malloc(sizeof(double)*N);
C_serial[i] = malloc(sizeof(double)*N);
BT[i] = malloc(sizeof(double)*N);
}
srand(time(NULL));
init(A);
init(B);
for(int line = 0; line<N; line++){
for(int col = 0; col<N; col++){
BT[line][col] = B[col][line];
}
}
int i;
int num_runs = 10;
double elapsed = read_timer();
for (i=0; i<num_runs; i++)
matmul_simd(A, BT, C_simd);
elapsed = (read_timer() - elapsed);
double elapsed_serial = read_timer();
for (i=0; i<num_runs; i++)
matmul_serial(A, BT, C_serial);
elapsed_serial = (read_timer() - elapsed_serial);
print_matrix(A);
print_matrix(BT);
puts("=\n");
print_matrix(C_simd);
puts("---------------------------------");
print_matrix(C_serial);
double gflops_omp = ((((2.0 * N) * N) * N * num_runs) / (1.0e9 * elapsed));
double gflops_serial = ((((2.0 * N) * N) * N * num_runs) / (1.0e9 * elapsed_serial));
printf("======================================================================================================\n");
printf("\tMatrix Multiplication: A[N][N] * B[N][N] = C[N][N], N=%d\n", N);
printf("------------------------------------------------------------------------------------------------------\n");
printf("Performance:\t\tRuntime (s)\t GFLOPS\n");
printf("------------------------------------------------------------------------------------------------------\n");
printf("matmul_omp:\t\t%4f\t%4f\n", elapsed, gflops_omp);
printf("matmul_serial:\t\t%4f\t%4f\n", elapsed_serial, gflops_serial);
printf("Correctness check: %f\n", check(C_simd,C_serial));
return 0;
}
|
compute_sentinel_refl.c | /******************************************************************************
FILE: compute_sentinel_refl.c
PURPOSE: Contains functions for handling the Sentinel-2 TOA reflectance and
surface reflectance corrections.
PROJECT: Land Satellites Data System Science Research and Development (LSRD)
at the USGS EROS
LICENSE TYPE: NASA Open Source Agreement Version 1.3
NOTES:
******************************************************************************/
//#define USE_GCTP 1
/* GAIL uncomment to use the GCTP library */
#include "lasrc.h"
#include "time.h"
#include "aero_interp.h"
#include "poly_coeff.h"
#include "read_level1_qa.h"
#include "read_level2_qa.h"
#ifndef USE_GCTP
#include "utmtodeg.h"
#endif
#define WRITE_TAERO 1
/******************************************************************************
MODULE: read_sentinel_toa_refl
PURPOSE: Reads the input TOA Sentinel reflectance bands and converts all bands
to 10m resolution.
RETURN VALUE:
Type = int
Value Description
----- -----------
ERROR Error reading the input TOA reflectance
SUCCESS No errors encountered
PROJECT: Land Satellites Data System Science Research and Development (LSRD)
at the USGS EROS
NOTES:
******************************************************************************/
int read_sentinel_toa_refl
(
Input_t *input, /* I: input structure for the Sentinel product */
Espa_internal_meta_t *xml_metadata,
/* I: XML metadata structure */
float **toaband /* O: output TOA reflectance values (unscaled) */
)
{
char errmsg[STR_SIZE]; /* error message */
char FUNC_NAME[] = "read_sentinel_toa_refl"; /* function name */
int i; /* looping variable for pixels */
int ib; /* looping variable for input bands */
int nlines10 = -99; /* number of lines in 10m reflectance bands */
int nsamps10 = -99; /* number of samps in 10m reflectance bands */
int nlines20 = -99; /* number of lines in 20m reflectance bands */
int nsamps20 = -99; /* number of samps in 20m reflectance bands */
int nlines60 = -99; /* number of lines in 20m reflectance bands */
int nsamps60 = -99; /* number of samps in 20m reflectance bands */
uint16 *tmp_band = NULL; /* array for input 10m image data for a single
band, nlines10 x nsamps10 */
uint16 *tmp20_band = NULL; /* array for input 20m image data for a single
band, nlines20 x nsamps20 */
uint16 *tmp60_band = NULL; /* array for input 60m image data for a single
band, nlines60 x nsamps60 */
Espa_band_meta_t *bmeta = xml_metadata->band;
/* pointer to the array of band metadata */
/* Determine the 10m, 20m, and 60m number of lines and samples */
for (ib = 0; ib < xml_metadata->nbands; ib++)
{
/* Use band 2 for the representative 10m band */
if (!strcmp (xml_metadata->band[ib].name, "B02"))
{
nlines10 = xml_metadata->band[ib].nlines;
nsamps10 = xml_metadata->band[ib].nsamps;
}
/* Use band 5 for the representative 20m band */
else if (!strcmp (xml_metadata->band[ib].name, "B05"))
{
nlines20 = xml_metadata->band[ib].nlines;
nsamps20 = xml_metadata->band[ib].nsamps;
}
/* Use band 1 for the representative 60m band */
else if (!strcmp (xml_metadata->band[ib].name, "B01"))
{
nlines60 = xml_metadata->band[ib].nlines;
nsamps60 = xml_metadata->band[ib].nsamps;
}
}
/* Make sure they were found and are valid */
if (nlines10 == -99 || nsamps10 == -99)
{
sprintf (errmsg, "Error obtaining the nlines/nsamps for 10m band");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
if (nlines20 == -99 || nsamps20 == -99)
{
sprintf (errmsg, "Error obtaining the nlines/nsamps for 20m band");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
if (nlines60 == -99 || nsamps60 == -99)
{
sprintf (errmsg, "Error obtaining the nlines/nsamps for 60m band");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Allocate memory for 10m, 20m 60m band data */
tmp_band = calloc (nlines10 * nsamps10, sizeof (uint16));
if (tmp_band == NULL)
{
sprintf (errmsg, "Error allocating memory for temporary 10m band");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
tmp20_band = calloc (nlines20 * nsamps20, sizeof (uint16));
if (tmp20_band == NULL)
{
sprintf (errmsg, "Error allocating memory for temporary 20m band");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
tmp60_band = calloc (nlines60 * nsamps60, sizeof (uint16));
if (tmp60_band == NULL)
{
sprintf (errmsg, "Error allocating memory for temporary 60m band");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Loop through the Sentinel-2 bands */
for (ib = DNS_BAND1; ib <= DNS_BAND12; ib++)
{
switch (ib)
{
/* 10m bands read as-is (4) */
case DNS_BAND2:
case DNS_BAND3:
case DNS_BAND4:
case DNS_BAND8:
/* Read the input band data */
if (get_input_refl_lines (input, ib, 0, nlines10, nsamps10,
tmp_band) != SUCCESS)
{
sprintf (errmsg, "Error reading Sentinel TOA 10m band %d",
ib+1);
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
break;
/* 20m bands convert to 10m (6) */
case DNS_BAND5:
case DNS_BAND6:
case DNS_BAND7:
case DNS_BAND8A:
case DNS_BAND11:
case DNS_BAND12:
/* Read the input band data */
if (get_input_refl_lines (input, ib, 0, nlines20, nsamps20,
tmp20_band) != SUCCESS)
{
sprintf (errmsg, "Error reading Sentinel TOA 20m band %d",
ib+1);
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Convert to 10m */
if (convert_to_10m (nlines20, nsamps20, nlines10, nsamps10,
tmp20_band, tmp_band) != SUCCESS)
{
sprintf (errmsg, "Error converting 20m band %d", ib+1);
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
break;
/* 60m bands convert to 10m (3, but skipping bands 9&10) */
case DNS_BAND1:
#ifdef PROC_ALL_BANDS
case DNS_BAND9:
case DNS_BAND10:
#endif
/* Read the input band data */
if (get_input_refl_lines (input, ib, 0, nlines60, nsamps60,
tmp60_band) != SUCCESS)
{
sprintf (errmsg, "Error reading Sentinel TOA 60m band %d",
ib+1);
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Convert to 10m */
if (convert_to_10m (nlines60, nsamps60, nlines10, nsamps10,
tmp60_band, tmp_band) != SUCCESS)
{
sprintf (errmsg, "Error converting 60m band %d", ib+1);
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
break;
} /* switch ib */
/* Unscale the data */
for (i = 0; i < nlines10 * nsamps10; i++)
{
/* If this is fill, leave the value as-is for masking later. O/W
unscale the data. */
if (tmp_band[i] == bmeta[ib].fill_value)
toaband[ib][i] = tmp_band[i];
else
toaband[ib][i] = tmp_band[i] * bmeta[ib].scale_factor +
bmeta[ib].add_offset;
}
} /* for ib */
/* Free the memory */
free (tmp_band);
free (tmp20_band);
free (tmp60_band);
return (SUCCESS);
}
/******************************************************************************
MODULE: compute_sentinel_sr_refl
PURPOSE: Computes the surface reflectance for all the Sentinel-2 reflectance
bands.
RETURN VALUE:
Type = int
Value Description
----- -----------
ERROR Error computing the reflectance
SUCCESS No errors encountered
PROJECT: Land Satellites Data System Science Research and Development (LSRD)
at the USGS EROS
NOTES:
1. Initializes the variables and data arrays from the lookup table and
auxiliary files.
2. The tauray array was originally read in from a static ASCII file, but it is
now hardcoded to save time from reading the file each time. This file was
generated (like many of the other auxiliary input tables) by running 6S and
storing the coefficients.
4. Aerosols are retrieved for all non-fill pixels. If the aerosol fails the
model residual or NDVI test, then the pixel is flagged as water. All water
pixels are run through a water-specific aerosol retrieval. If the model
residual fails, then that pixel is marked as failed aerosol retrieval. Any
pixel that failed retrieval is then interpolated using an average of the
clear (valid land pixel aerosols) and water (valid water pixel aerosols).
Those final aerosol values are used for the surface reflectance corrections.
5. Cloud-based QA information is not processed in this algorithm.
******************************************************************************/
int compute_sentinel_sr_refl
(
Input_t *input, /* I: input structure for the Landsat product */
Espa_internal_meta_t *xml_metadata,
/* I: XML metadata structure */
char *xml_infile, /* I: input XML filename */
uint16 *qaband, /* O: QA band generated for image, nlines x nsamps */
int nlines, /* I: number of lines in reflectance, thermal bands */
int nsamps, /* I: number of samps in reflectance, thermal bands */
float pixsize, /* I: pixel size for the reflectance bands */
float **toaband, /* I: unscaled TOA reflectance bands, nlines x nsamps */
float **sband, /* O: output unscaled SR bands, nlines x nsamps */
uint16 *out_band, /* I: allocated array for writing scaled output */
float xts, /* I: scene center solar zenith angle (deg) */
float xmus, /* I: cosine of solar zenith angle */
bool use_orig_aero, /* I: use the original aerosol handling if specified,
o/w use the semi-empirical approach */
char *anglehdf, /* I: angle HDF filename */
char *intrefnm, /* I: intrinsic reflectance filename */
char *transmnm, /* I: transmission filename */
char *spheranm, /* I: spherical albedo filename */
char *cmgdemnm, /* I: climate modeling grid DEM filename */
char *rationm, /* I: ratio averages filename */
char *auxnm /* I: auxiliary filename for ozone and water vapor */
)
{
char errmsg[STR_SIZE]; /* error message */
char FUNC_NAME[] = "compute_sentinel_sr_refl"; /* function name */
Sat_t sat = input->meta.sat; /* satellite */
int retval; /* return status */
int i, j; /* looping variable for pixels */
int ib; /* looping variable for input bands */
int iband; /* current band */
int curr_pix; /* current pixel in 1D arrays of nlines x nsamps */
int iline; /* current line in the 6x6 window for atm corr */
int isamp; /* current sample in the 6x6 window for atm corr */
int ew_line; /* ending line in the 6x6 window for atm corr */
int ew_samp; /* ending sample in the 6x6 window for atm corr */
int curr_win_pix; /* current pixel in the 6x6 window for atm corr */
int pix_count; /* count of valid pixels in the 5x5 window */
long npixels; /* number of pixels to process */
bool is_fill; /* flag for whether the current pixel is fill */
float tmpf; /* temporary floating point value */
float rotoa; /* top of atmosphere reflectance */
float roslamb; /* lambertian surface reflectance */
float tgo; /* other gaseous transmittance (tgog * tgoz) */
float roatm; /* intrinsic atmospheric reflectance */
float ttatmg; /* total atmospheric transmission */
float satm; /* atmosphere spherical albedo */
float tgo_x_roatm; /* variable for tgo * roatm */
float tgo_x_ttatmg; /* variable for tgo * ttatmg */
float xrorayp; /* reflectance of the atmosphere due to molecular
(Rayleigh) scattering */
float erelc[NSR_BANDS]; /* band ratio variable for refl bands */
float troatm[NSR_BANDS]; /* atmospheric reflectance table for refl bands */
int iband1; /* band index (zero-based) */
float raot; /* AOT reflectance */
/* raot values for three different eps values */
float residual; /* model residual */
float residual1, residual2, residual3;
/* residuals for 3 different eps values */
float rsurf; /* surface reflectance */
float corf; /* aerosol impact (higher values represent high
aerosol) */
float ros1,ros4,ros5; /* surface reflectance for bands 1, 4, and 5 */
#ifndef _OPENMP
int curr_tmp_percent; /* percentage for current line */
int tmp_percent; /* current percentage for printing status */
#endif
float lat, lon; /* pixel lat, long location */
int lcmg, scmg; /* line/sample index for the CMG */
int lcmg1, scmg1; /* line+1/sample+1 index for the CMG */
float u, v; /* line/sample index for the CMG */
float one_minus_u; /* 1.0 - u */
float one_minus_v; /* 1.0 - v */
float one_minus_u_x_one_minus_v; /* (1.0 - u) * (1.0 - v) */
float one_minus_u_x_v; /* (1.0 - u) * v */
float u_x_one_minus_v; /* u * (1.0 - v) */
float u_x_v; /* u * v */
float ndwi_th1, ndwi_th2; /* values for NDWI calculations */
float xcmg, ycmg; /* x/y location for CMG */
float xndwi; /* calculated NDWI value */
int uoz11, uoz21, uoz12, uoz22; /* ozone at line,samp; line, samp+1;
line+1, samp; and line+1, samp+1 */
float pres11, pres12, pres21, pres22; /* pressure at line,samp;
line, samp+1; line+1, samp; and line+1, samp+1 */
float wv11, wv12, wv21, wv22; /* water vapor at line,samp;
line, samp+1; line+1, samp; and line+1, samp+1 */
uint8 *ipflag = NULL; /* QA flag to assist with aerosol interpolation,
nlines x nsamps */
float *twvi = NULL; /* interpolated water vapor value,
nlines x nsamps */
float *tozi = NULL; /* interpolated ozone value, nlines x nsamps */
float *tp = NULL; /* interpolated pressure value, nlines x nsamps */
float *taero = NULL; /* aerosol values for each pixel, nlines x nsamps */
float *teps = NULL; /* angstrom coeff for each pixel, nlines x nsamps */
Espa_band_meta_t *bmeta = xml_metadata->band;
/* pointer to the array of band metadata */
/* Vars for forward/inverse mapping space */
Geoloc_t *space = NULL; /* structure for geolocation information */
Space_def_t space_def; /* structure to define the space mapping */
#ifdef USE_GCTP
Img_coord_float_t img; /* coordinate in line/sample space */
Geo_coord_t geo; /* coordinate in lat/long space */
#endif
/* Lookup table variables */
float eps; /* angstrom coefficient */
float eps1, eps2, eps3; /* eps values for three runs */
float xtv; /* observation zenith angle (deg) */
float xmuv; /* cosine of observation zenith angle */
float xfi; /* azimuthal difference between the sun and
observation angle (deg) */
float cosxfi; /* cosine of azimuthal difference */
float xtsstep; /* solar zenith step value */
float xtsmin; /* minimum solar zenith value */
float xtvstep; /* observation step value */
float xtvmin; /* minimum observation value */
float *rolutt = NULL; /* intrinsic reflectance table
[NSR_BANDS x NPRES_VALS x NAOT_VALS x NSOLAR_VALS] */
float *transt = NULL; /* transmission table
[NSR_BANDS x NPRES_VALS x NAOT_VALS x NSUNANGLE_VALS] */
float *sphalbt = NULL; /* spherical albedo table
[NSR_BANDS x NPRES_VALS x NAOT_VALS] */
float *normext = NULL; /* aerosol extinction coefficient at the current
wavelength (normalized at 550nm)
[NSR_BANDS x NPRES_VALS x NAOT_VALS] */
float *tsmax = NULL; /* maximum scattering angle table
[NVIEW_ZEN_VALS x NSOLAR_ZEN_VALS] */
float *tsmin = NULL; /* minimum scattering angle table
[NVIEW_ZEN_VALS x NSOLAR_ZEN_VALS] */
float *nbfi = NULL; /* number of azimuth angles
[NVIEW_ZEN_VALS x NSOLAR_ZEN_VALS] */
float *nbfic = NULL; /* communitive number of azimuth angles
[NVIEW_ZEN_VALS x NSOLAR_ZEN_VALS] */
float *ttv = NULL; /* view angle table
[NVIEW_ZEN_VALS x NSOLAR_ZEN_VALS] */
float tts[22]; /* sun angle table */
int32 indts[22]; /* index for sun angle table */
int iaots; /* index for AOTs */
/* Atmospheric correction coefficient variables (semi-empirical approach) */
float tgo_arr[NREFL_BANDS]; /* per-band other gaseous transmittance */
float roatm_arr[NREFL_BANDS][NAOT_VALS]; /* per band AOT vals for roatm */
float ttatmg_arr[NREFL_BANDS][NAOT_VALS]; /* per band AOT vals for ttatmg */
float satm_arr[NREFL_BANDS][NAOT_VALS]; /* per band AOT vals for satm */
float roatm_coef[NREFL_BANDS][NCOEF]; /* per band poly coeffs for roatm */
float ttatmg_coef[NREFL_BANDS][NCOEF]; /* per band poly coeffs for ttatmg */
float satm_coef[NREFL_BANDS][NCOEF]; /* per band poly coeffs for satm */
float normext_p0a3_arr[NREFL_BANDS]; /* per band normext[iband][0][3] */
int roatm_iaMax[NREFL_BANDS];
int ia; /* looping variable for AOTs */
int iaMaxTemp; /* max temp for current AOT level */
/* Auxiliary file variables */
int16 *dem = NULL; /* CMG DEM data array [DEM_NBLAT x DEM_NBLON] */
int16 *andwi = NULL; /* avg NDWI [RATIO_NBLAT x RATIO_NBLON] */
int16 *sndwi = NULL; /* standard NDWI [RATIO_NBLAT x RATIO_NBLON] */
int16 *ratiob1 = NULL; /* mean band1 ratio [RATIO_NBLAT x RATIO_NBLON] */
int16 *ratiob2 = NULL; /* mean band2 ratio [RATIO_NBLAT x RATIO_NBLON] */
int16 *ratiob7 = NULL; /* mean band7 ratio [RATIO_NBLAT x RATIO_NBLON] */
int16 *intratiob1 = NULL; /* intercept band1 ratio,
RATIO_NBLAT x RATIO_NBLON */
int16 *intratiob2 = NULL; /* intercept band2 ratio
RATIO_NBLAT x RATIO_NBLON */
int16 *intratiob7 = NULL; /* intercept band7 ratio
RATIO_NBLAT x RATIO_NBLON */
int16 *slpratiob1 = NULL; /* slope band1 ratio
RATIO_NBLAT x RATIO_NBLON */
int16 *slpratiob2 = NULL; /* slope band2 ratio
RATIO_NBLAT x RATIO_NBLON */
int16 *slpratiob7 = NULL; /* slope band7 ratio
RATIO_NBLAT x RATIO_NBLON */
uint16 *wv = NULL; /* water vapor values [CMG_NBLAT x CMG_NBLON] */
uint8 *oz = NULL; /* ozone values [CMG_NBLAT x CMG_NBLON] */
float raot550nm; /* nearest input value of AOT */
float uoz = 0.0; /* total column ozone */
float uwv = 0.0; /* total column water vapor (precipital water vapor) */
float pres = 0.0; /* surface pressure */
float rb1; /* band ratio 1 (unscaled) */
float rb2; /* band ratio 2 (unscaled) */
float slpr11, slpr12, slpr21, slpr22; /* band ratio slope at line,samp;
line, samp+1; line+1, samp; and line+1, samp+1 */
float intr11, intr12, intr21, intr22; /* band ratio intercept at line,samp;
line, samp+1; line+1, samp; and line+1, samp+1 */
float slprb1, slprb2, slprb7; /* interpolated band ratio slope values for
band ratios 1, 2, 7 */
float intrb1, intrb2, intrb7; /* interpolated band ratio intercept values
for band ratios 1, 2, 7 */
int ratio_pix11; /* pixel location for ratio products [lcmg][scmg] */
int ratio_pix12; /* pixel location for ratio products [lcmg][scmg+1] */
int ratio_pix21; /* pixel location for ratio products [lcmg+1][scmg] */
int ratio_pix22; /* pixel location for ratio products [lcmg+1][scmg+1] */
int cmg_pix11; /* pixel location for CMG/DEM products [lcmg][scmg] */
int cmg_pix12; /* pixel location for CMG/DEM products [lcmg][scmg+1] */
int cmg_pix21; /* pixel location for CMG/DEM products [lcmg+1][scmg] */
int cmg_pix22; /* pixel location for CMG/DEM products [lcmg+1][scmg+1] */
/* Variables for finding the eps that minimizes the residual */
double xa, xb, xc, xd, xe, xf; /* coefficients */
double coefa, coefb; /* coefficients */
float epsmin; /* eps which minimizes the residual */
float resepsmin; /* residual eps which minimizes residual */
/* Output file info */
time_t mytime; /* timing variable */
Output_t *sr_output = NULL; /* output structure and metadata for the SR
product */
Envi_header_t envi_hdr; /* output ENVI header information */
char envi_file[STR_SIZE]; /* ENVI filename */
char *cptr = NULL; /* pointer to the file extension */
/* Table constants */
float aot550nm[NAOT_VALS] = /* AOT look-up table */
{0.01, 0.05, 0.10, 0.15, 0.20, 0.30, 0.40, 0.60, 0.80, 1.00, 1.20,
1.40, 1.60, 1.80, 2.00, 2.30, 2.60, 3.00, 3.50, 4.00, 4.50, 5.00};
float tpres[NPRES_VALS] = /* surface pressure table */
{1050.0, 1013.0, 900.0, 800.0, 700.0, 600.0, 500.0};
/* Atmospheric correction variables */
/* Look up table for atmospheric and geometric quantities. Taurary comes
from tauray-ldcm/msi.ASC and the oz, wv, og variables come from
gascoef-modis/msi.ASC. */
/* NOTE: coefficients for bands 9 and 10 may have been removed from these
arrays since those bands might not be processed */
#ifdef PROC_ALL_BANDS
/* Process all bands if turned on */
float tauray[NSRS_BANDS] = /* molecular optical thickness
coefficients -- produced by running 6S */
{0.23432, 0.15106, 0.09102, 0.04535, 0.03584, 0.02924, 0.02338, 0.01847,
0.01560, 0.01092, 0.00243, 0.00128, 0.00037};
double oztransa[NSRS_BANDS] = /* ozone transmission coeff */
{-0.00264691, -0.0272572, -0.0986512, -0.0500348, -0.0204295,
-0.0108641, 0.0001, 0.0001, 0.0001, 0.0001, 0.0001, 0.0001, 0.0001};
double wvtransa[NSRS_BANDS] = /* water vapor transmission coeff */
{2.29849e-27, 2.29849e-27, 0.000777307, 0.00361051, 0.0141249,
0.0137067, 0.00410217, 0.0285871, 0.000390755, 0.00001, 0.01,
0.000640155, 0.018006};
double wvtransb[NSRS_BANDS] = /* water vapor transmission coeff */
{0.999742, 0.999742, 0.891099, 0.754895, 0.75596, 0.763497, 0.74117,
0.578722, 0.900899, 0.45818, 1.0, 0.943712, 0.647517};
double ogtransa1[NSRS_BANDS] = /* other gases transmission coeff */
{4.91586e-20, 4.91586e-20, 4.91586e-20, 4.91586e-20, 5.3367e-06,
4.91586e-20, 9.03583e-05, 1.64109e-09, 1.90458e-05, 4.91586e-20,
7.62429e-06, 0.0212751, 0.0243065};
double ogtransb0[NSRS_BANDS] = /* other gases transmission coeff */
{0.000197019, 0.000197019, 0.000197019, 0.000197019, -0.980313,
0.000197019, 0.0265393, 1.E-10, 0.0322844, 0.000197019, 0.000197019,
0.000197019, 0.000197019};
double ogtransb1[NSRS_BANDS] = /* other gases transmission coeff */
{9.57011e-16, 9.57011e-16, 9.57011e-16, 9.57011e-16, 1.33639,
9.57011e-16, 0.0532256, 1.E-10, -0.0219907, 9.57011e-16, -0.216849,
0.0116062, 0.0604312};
#else
/* Skip bands 9 and 10 as default for ESPA */
float tauray[NSRS_BANDS] = /* molecular optical thickness
coefficients -- produced by running 6S */
{0.23432, 0.15106, 0.09102, 0.04535, 0.03584, 0.02924, 0.02338, 0.01847,
0.01560, 0.00128, 0.00037};
double oztransa[NSRS_BANDS] = /* ozone transmission coeff */
{-0.00264691, -0.0272572, -0.0986512, -0.0500348, -0.0204295,
-0.0108641, 0.0001, 0.0001, 0.0001, 0.0001, 0.0001};
double wvtransa[NSRS_BANDS] = /* water vapor transmission coeff */
{2.29849e-27, 2.29849e-27, 0.000777307, 0.00361051, 0.0141249,
0.0137067, 0.00410217, 0.0285871, 0.000390755, 0.000640155, 0.018006};
double wvtransb[NSRS_BANDS] = /* water vapor transmission coeff */
{0.999742, 0.999742, 0.891099, 0.754895, 0.75596, 0.763497, 0.74117,
0.578722, 0.900899, 0.943712, 0.647517};
double ogtransa1[NSRS_BANDS] = /* other gases transmission coeff */
{4.91586e-20, 4.91586e-20, 4.91586e-20, 4.91586e-20, 5.3367e-06,
4.91586e-20, 9.03583e-05, 1.64109e-09, 1.90458e-05, 0.0212751,
0.0243065};
double ogtransb0[NSRS_BANDS] = /* other gases transmission coeff */
{0.000197019, 0.000197019, 0.000197019, 0.000197019, -0.980313,
0.000197019, 0.0265393, 1.E-10, 0.0322844, 0.000197019, 0.000197019};
double ogtransb1[NSRS_BANDS] = /* other gases transmission coeff */
{9.57011e-16, 9.57011e-16, 9.57011e-16, 9.57011e-16, 1.33639,
9.57011e-16, 0.0532256, 1.E-10, -0.0219907, 0.0116062, 0.0604312};
#endif
#ifdef WRITE_TAERO
FILE *aero_fptr=NULL; /* file pointer for aerosol files */
#endif
/* Start processing */
mytime = time(NULL);
printf ("Start surface reflectance corrections: %s", ctime(&mytime));
#ifdef PROC_ALL_BANDS
printf ("All Sentinel-2 bands will be processed, including bands 9 and 10, "
"which is not the default.\n");
#endif
/* Allocate memory for the many arrays needed to do the surface reflectance
computations */
npixels = nlines * nsamps;
retval = sentinel_memory_allocation_sr (nlines, nsamps, &ipflag, &twvi,
&tozi, &tp, &taero, &teps, &dem, &andwi, &sndwi, &ratiob1, &ratiob2,
&ratiob7, &intratiob1, &intratiob2, &intratiob7, &slpratiob1,
&slpratiob2, &slpratiob7, &wv, &oz, &rolutt, &transt, &sphalbt,
&normext, &tsmax, &tsmin, &nbfic, &nbfi, &ttv);
if (retval != SUCCESS)
{
sprintf (errmsg, "Error allocating memory for the data arrays needed "
"for Sentinel surface reflectance calculations.");
error_handler (false, FUNC_NAME, errmsg);
return (ERROR);
}
/* Initialize the geolocation space applications */
if (!get_geoloc_info (xml_metadata, &space_def))
{
sprintf (errmsg, "Getting the space definition from the XML file");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
#ifdef USE_GCTP
space = setup_mapping (&space_def);
if (space == NULL)
{
sprintf (errmsg, "Setting up the geolocation mapping");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
#endif
/* Initialize the look up tables and atmospheric correction variables.
view zenith initialized to scene center (xtv)
azimuthal difference between sun and obs angle initialize to difference
at scene center (xfi)
surface pressure is initialized to the pressure at the center of the
scene (using the DEM) (pres)
water vapor is initialized to the value at the center of the scene (uwv)
ozone is initialized to the value at the center of the scene (uoz) */
retval = init_sr_refl (nlines, nsamps, input, &space_def, space, anglehdf,
intrefnm, transmnm, spheranm, cmgdemnm, rationm, auxnm, &eps, &iaots,
&xtv, &xmuv, &xfi, &cosxfi, &raot550nm, &pres, &uoz, &uwv, &xtsstep,
&xtsmin, &xtvstep, &xtvmin, tsmax, tsmin, tts, ttv, indts, rolutt,
transt, sphalbt, normext, nbfic, nbfi, dem, andwi, sndwi, ratiob1,
ratiob2, ratiob7, intratiob1, intratiob2, intratiob7, slpratiob1,
slpratiob2, slpratiob7, wv, oz);
if (retval != SUCCESS)
{
sprintf (errmsg, "Error initializing the lookup tables and "
"atmospheric correction variables.");
error_handler (false, FUNC_NAME, errmsg);
return (ERROR);
}
/* Loop through all the reflectance bands and perform atmospheric
corrections based on climatology */
mytime = time(NULL);
printf ("Performing atmospheric corrections for each Sentinel reflectance "
"band ... %s", ctime(&mytime)); fflush(stdout);
/* Flag fill pixels as any pixel with all bands containing fill values.
This used to be flag as fill if any pixel is fill, but often the S2
values for non-visible bands are a value of 0. */
for (i = 0; i < npixels; i++)
{
/* Initialize to true and break out if any band is not fill */
is_fill = true;
for (ib = 0; ib <= SRS_BAND12; ib++)
{
if (toaband[ib][i] != bmeta[ib].fill_value)
{
/* No need to look any further */
is_fill = false;
break;
}
} /* end for ib */
/* If this is fill then mask it as such in the various outputs */
if (is_fill)
{
qaband[i] |= (1 << ESPA_L1_DESIGNATED_FILL_BIT);
for (ib = 0; ib <= SRS_BAND12; ib++)
sband[ib][i] = FILL_VALUE;
}
} /* for i */
/* rotoa is not defined for the atmcorlamb2 call, which is ok, but the
roslamb value is not valid upon output. Just set it to 0.0 to be
consistent. */
rotoa = 0.0;
raot550nm = 0.05;
eps = -1.0;
for (ib = 0; ib <= SRS_BAND12; ib++)
{
printf (" Band %s\n", SENTINEL_BANDNAME[ib]); fflush(stdout);
#ifdef PROC_ALL_BANDS
/* Process all bands if turned on */
/* Get the parameters for the atmospheric correction */
if (ib != SRS_BAND9) /* skip the water vapor band */
{
retval = atmcorlamb2 (input->meta.sat, xts, xtv, xmus, xmuv, xfi,
cosxfi, raot550nm, ib, pres, tpres, aot550nm, rolutt, transt,
xtsstep, xtsmin, xtvstep, xtvmin, sphalbt, normext, tsmax,
tsmin, nbfic, nbfi, tts, indts, ttv, uoz, uwv, tauray,
ogtransa1, ogtransb0, ogtransb1, wvtransa, wvtransb, oztransa,
rotoa, &roslamb, &tgo, &roatm, &ttatmg, &satm, &xrorayp, eps);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing lambertian atmospheric correction "
"type 2.");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
}
else
{
/* Use default values for band 9, water vapor band */
tgo = 1.0;
roatm = 0.0;
ttatmg = 1.0;
satm = 0.0;
}
#else
/* Skip bands 9 and 10 as default for ESPA */
/* Get the parameters for the atmospheric correction */
retval = atmcorlamb2 (input->meta.sat, xts, xtv, xmus, xmuv, xfi,
cosxfi, raot550nm, ib, pres, tpres, aot550nm, rolutt, transt,
xtsstep, xtsmin, xtvstep, xtvmin, sphalbt, normext, tsmax,
tsmin, nbfic, nbfi, tts, indts, ttv, uoz, uwv, tauray,
ogtransa1, ogtransb0, ogtransb1, wvtransa, wvtransb, oztransa,
rotoa, &roslamb, &tgo, &roatm, &ttatmg, &satm, &xrorayp, eps);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing lambertian atmospheric correction "
"type 2.");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
#endif
tgo_x_roatm = tgo * roatm;
tgo_x_ttatmg = tgo * ttatmg;
/* Perform atmospheric corrections for reflectance bands */
#ifdef _OPENMP
#pragma omp parallel for private (i, roslamb)
#endif
for (i = 0; i < npixels; i++)
{
/* If this pixel is not fill then handle the atmospheric
correction */
if (!level1_qa_is_fill (qaband[i]))
{
/* Apply the atmospheric corrections (ignoring the Rayleigh
scattering component and water vapor), and store the
scaled value for further corrections. (NOTE: the full
computations are in atmcorlamb2) */
roslamb = toaband[ib][i] - tgo_x_roatm;
roslamb /= tgo_x_ttatmg + satm * roslamb;
sband[ib][i] = roslamb;
}
} /* end for i */
} /* for ib */
printf ("\n");
/* Start the retrieval of atmospheric correction parameters for each band */
mytime = time(NULL);
printf ("Starting retrieval of atmospheric correction parameters ... %s",
ctime(&mytime)); fflush(stdout);
/* Get the coefficients for the semi-empirical atmospheric correction */
if (!use_orig_aero)
{
mytime = time(NULL);
printf ("Obtaining the coefficients for the semi-empirical approach "
"... %s", ctime(&mytime));
for (ib = 0; ib <= SRS_BAND12; ib++)
{
/* Get the parameters for the atmospheric correction */
/* rotoa is not defined for this call, which is ok, but the
roslamb value is not valid upon output. Just set it to 0.0 to
be consistent. */
normext_p0a3_arr[ib] = normext[ib * NPRES_VALS * NAOT_VALS + 0 + 3];
/* normext[ib][0][3]; */
rotoa = 0.0;
eps = -1.0;
for (ia = 0; ia < NAOT_VALS; ia++)
{
raot550nm = aot550nm[ia];
retval = atmcorlamb2 (input->meta.sat, xts, xtv, xmus, xmuv,
xfi, cosxfi, raot550nm, ib, pres, tpres, aot550nm, rolutt,
transt, xtsstep, xtsmin, xtvstep, xtvmin, sphalbt, normext,
tsmax, tsmin, nbfic, nbfi, tts, indts, ttv, uoz, uwv,
tauray, ogtransa1, ogtransb0, ogtransb1, wvtransa, wvtransb,
oztransa, rotoa, &roslamb, &tgo, &roatm, &ttatmg, &satm,
&xrorayp, eps);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing lambertian atmospheric "
"correction type 2 for band %d.", ib);
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
/* Store the AOT-related variables for use in the atmospheric
corrections */
roatm_arr[ib][ia] = roatm;
ttatmg_arr[ib][ia] = ttatmg;
satm_arr[ib][ia] = satm;
}
/* Store the band-related variables for use in the atmospheric
corrections. tgo and xrorayp are the same for each AOT, so just
save the last set for this band. */
tgo_arr[ib] = tgo;
}
/* Setup the 3rd order polynomial coefficients for the semi-empirical
approach in the aerosol inversion */
for (ib = 0; ib <= SRS_BAND12; ib++)
{
/* Determine the maximum AOT index */
iaMaxTemp = 1;
for (ia = 1; ia < NAOT_VALS; ia++)
{
if (ia == NAOT_VALS-1)
iaMaxTemp = NAOT_VALS-1;
if ((roatm_arr[ib][ia] - roatm_arr[ib][ia-1]) > ESPA_EPSILON)
continue;
else
{
iaMaxTemp = ia-1;
break;
}
}
/* Get the polynomial coefficients for roatm */
roatm_iaMax[ib] = iaMaxTemp;
get_3rd_order_poly_coeff (aot550nm, roatm_arr[ib], iaMaxTemp,
roatm_coef[ib]);
/* Get the polynomial coefficients for ttatmg */
get_3rd_order_poly_coeff (aot550nm, ttatmg_arr[ib], NAOT_VALS,
ttatmg_coef[ib]);
/* Get the polynomial coefficients for satm */
get_3rd_order_poly_coeff (aot550nm, satm_arr[ib], NAOT_VALS,
satm_coef[ib]);
}
} /* if !use_orig_aero */
/* If using the original aerosol approach we need some auxiliary data to
be interpolated for every pixel so it's available for the final aerosol
correction */
if (use_orig_aero)
{
mytime = time(NULL);
printf ("Interpolating the auxiliary data ... %s", ctime(&mytime));
#if defined(_OPENMP) && defined(USE_GCTP)
#pragma omp parallel for private (i, j, curr_pix, img, geo, lat, lon, xcmg, ycmg, lcmg, scmg, lcmg1, scmg1, u, v, one_minus_u, one_minus_v, one_minus_u_x_one_minus_v, one_minus_u_x_v, u_x_one_minus_v, u_x_v, cmg_pix11, cmg_pix12, cmg_pix21, cmg_pix22, wv11, wv12, wv21, wv22, uoz11, uoz12, uoz21, uoz22, pres11, pres12, pres21, pres22)
#elif defined(_OPENMP)
#pragma omp parallel for private (i, j, curr_pix, lat, lon, xcmg, ycmg, lcmg, scmg, lcmg1, scmg1, u, v, one_minus_u, one_minus_v, one_minus_u_x_one_minus_v, one_minus_u_x_v, u_x_one_minus_v, u_x_v, cmg_pix11, cmg_pix12, cmg_pix21, cmg_pix22, wv11, wv12, wv21, wv22, uoz11, uoz12, uoz21, uoz22, pres11, pres12, pres21, pres22)
#endif
for (i = 0; i < nlines; i++)
{
curr_pix = i * nsamps;
for (j = 0; j < nsamps; j++, curr_pix++)
{
/* If this pixel is fill, do not process */
if (level1_qa_is_fill (qaband[i]))
continue;
/* Get the lat/long for the current pixel */
#ifdef USE_GCTP
img.l = i - 0.5;
img.s = j + 0.5;
img.is_fill = false;
if (!from_space (space, &img, &geo))
{
sprintf (errmsg, "Mapping line/sample (%d, %d) to "
"geolocation coords", i, j);
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
lat = geo.lat * RAD2DEG;
lon = geo.lon * RAD2DEG;
#else
utmtodeg (&space_def, i, j, &lat, &lon);
#endif
/* Use that lat/long to determine the line/sample in the
CMG-related lookup tables, using the center of the UL
pixel. Note, we are basically making sure the line/sample
combination falls within -90, 90 and -180, 180 global climate
data boundaries. However, the source code below uses lcmg+1
and scmg+1, which for some scenes may wrap around the
dateline or the poles. Thus we need to wrap the CMG data
around to the beginning of the array. */
/* Each CMG pixel is 0.05 x 0.05 degrees. Use the center of the
pixel for each calculation. Negative latitude values should
be the largest line values in the CMG grid. Negative
longitude values should be the smallest sample values in the
CMG grid. */
/* The line/sample calculation from the x/ycmg values are not
rounded. The interpolation of the value using line+1 and
sample+1 are based on the truncated numbers, therefore
rounding up is not appropriate. */
ycmg = (89.975 - lat) * 20.0; /* vs / 0.05 */
xcmg = (179.975 + lon) * 20.0; /* vs / 0.05 */
lcmg = (int) ycmg;
scmg = (int) xcmg;
/* Handle the edges of the lat/long values in the CMG grid */
if (lcmg < 0)
lcmg = 0;
else if (lcmg >= CMG_NBLAT)
lcmg = CMG_NBLAT;
if (scmg < 0)
scmg = 0;
else if (scmg >= CMG_NBLON)
scmg = CMG_NBLON;
/* If the current CMG pixel is at the edge of the CMG array,
then allow the next pixel for interpolation to wrap around
the array */
if (scmg >= CMG_NBLON-1) /* 180 degrees so wrap around */
scmg1 = 0;
else
scmg1 = scmg + 1;
if (lcmg >= CMG_NBLAT-1) /* -90 degrees so wrap around */
lcmg1 = 0;
else
lcmg1 = lcmg + 1;
/* Determine the four CMG pixels to be used for the current
Landsat pixel */
cmg_pix11 = lcmg * CMG_NBLON + scmg;
cmg_pix12 = lcmg * CMG_NBLON + scmg1;
cmg_pix21 = lcmg1 * CMG_NBLON + scmg;
cmg_pix22 = lcmg1 * CMG_NBLON + scmg1;
/* Get the water vapor pixels. If the water vapor value is
fill (=0), then use it as-is. */
wv11 = wv[cmg_pix11];
wv12 = wv[cmg_pix12];
wv21 = wv[cmg_pix21];
wv22 = wv[cmg_pix22];
/* Get the ozone pixels. If the ozone value is fill (=0), then
use a default value of 120. */
uoz11 = oz[cmg_pix11];
if (uoz11 == 0)
uoz11 = 120;
uoz12 = oz[cmg_pix12];
if (uoz12 == 0)
uoz12 = 120;
uoz21 = oz[cmg_pix21];
if (uoz21 == 0)
uoz21 = 120;
uoz22 = oz[cmg_pix22];
if (uoz22 == 0)
uoz22 = 120;
/* Get the surface pressure from the global DEM. Set to 1013.0
(sea level) if the DEM is fill (= -9999), which is likely
ocean. The dimensions on the DEM array is the same as that
of the CMG arrays. Use the current pixel locations already
calculated. */
if (dem[cmg_pix11] != -9999)
pres11 = 1013.0 * exp (-dem[cmg_pix11] * ONE_DIV_8500);
else
pres11 = 1013.0;
if (dem[cmg_pix12] != -9999)
pres12 = 1013.0 * exp (-dem[cmg_pix12] * ONE_DIV_8500);
else
pres12 = 1013.0;
if (dem[cmg_pix21] != -9999)
pres21 = 1013.0 * exp (-dem[cmg_pix21] * ONE_DIV_8500);
else
pres21 = 1013.0;
if (dem[cmg_pix22] != -9999)
pres22 = 1013.0 * exp (-dem[cmg_pix22] * ONE_DIV_8500);
else
pres22 = 1013.0;
/* Determine the fractional difference between the integer
location and floating point pixel location to be used for
interpolation */
u = (ycmg - lcmg);
v = (xcmg - scmg);
one_minus_u = 1.0 - u;
one_minus_v = 1.0 - v;
one_minus_u_x_one_minus_v = one_minus_u * one_minus_v;
one_minus_u_x_v = one_minus_u * v;
u_x_one_minus_v = u * one_minus_v;
u_x_v = u * v;
/* Interpolate water vapor, and unscale */
twvi[curr_pix] = wv11 * one_minus_u_x_one_minus_v +
wv12 * one_minus_u_x_v +
wv21 * u_x_one_minus_v +
wv22 * u_x_v;
twvi[curr_pix] = twvi[curr_pix] * 0.01; /* vs / 100 */
/* Interpolate ozone, and unscale */
tozi[curr_pix] = uoz11 * one_minus_u_x_one_minus_v +
uoz12 * one_minus_u_x_v +
uoz21 * u_x_one_minus_v +
uoz22 * u_x_v;
tozi[curr_pix] = tozi[curr_pix] * 0.0025; /* vs / 400 */
/* Interpolate surface pressure */
tp[curr_pix] = pres11 * one_minus_u_x_one_minus_v +
pres12 * one_minus_u_x_v +
pres21 * u_x_one_minus_v +
pres22 * u_x_v;
} /* end for j */
} /* end for i */
} /* if use_orig_aero */
/* Compute some EPS values */
eps1 = LOW_EPS;
eps2 = MOD_EPS;
eps3 = HIGH_EPS;
xa = (eps1 * eps1) - (eps3 * eps3);
xd = (int) ((eps2 * eps2) - (eps3 * eps3));
xb = eps1 - eps3;
xe = eps2 - eps3;
/* Start the aerosol inversion */
mytime = time(NULL);
printf ("Aerosol Inversion using %d x %d aerosol window ... %s",
SAERO_WINDOW, SAERO_WINDOW, ctime(&mytime));
#if defined(_OPENMP) && defined(USE_GCTP)
#pragma omp parallel for private (i, j, curr_pix, img, geo, lat, lon, xcmg, ycmg, lcmg, scmg, lcmg1, scmg1, u, v, one_minus_u, one_minus_v, one_minus_u_x_one_minus_v, one_minus_u_x_v, u_x_one_minus_v, u_x_v, ratio_pix11, ratio_pix12, ratio_pix21, ratio_pix22, rb1, rb2, slpr11, slpr12, slpr21, slpr22, intr11, intr12, intr21, intr22, slprb1, slprb2, slprb7, intrb1, intrb2, intrb7, xndwi, ndwi_th1, ndwi_th2, iline, isamp, curr_win_pix, pix_count, ew_line, ew_samp, ib, iband, iband1, iaots, pres, uoz, uwv, retval, eps, residual, residual1, residual2, residual3, raot, xc, xf, coefa, coefb, epsmin, resepsmin, corf, rotoa, raot550nm, roslamb, tgo, roatm, ttatmg, satm, xrorayp, ros1, ros4, ros5, erelc, troatm)
#elif defined(_OPENMP)
#pragma omp parallel for private (i, j, curr_pix, lat, lon, xcmg, ycmg, lcmg, scmg, lcmg1, scmg1, u, v, one_minus_u, one_minus_v, one_minus_u_x_one_minus_v, one_minus_u_x_v, u_x_one_minus_v, u_x_v, ratio_pix11, ratio_pix12, ratio_pix21, ratio_pix22, rb1, rb2, slpr11, slpr12, slpr21, slpr22, intr11, intr12, intr21, intr22, slprb1, slprb2, slprb7, intrb1, intrb2, intrb7, xndwi, ndwi_th1, ndwi_th2, iline, isamp, curr_win_pix, pix_count, ew_line, ew_samp, ib, iband, iband1, iaots, pres, uoz, uwv, retval, eps, residual, residual1, residual2, residual3, raot, xc, xf, coefa, coefb, epsmin, resepsmin, corf, rotoa, raot550nm, roslamb, tgo, roatm, ttatmg, satm, xrorayp, ros1, ros4, ros5, erelc, troatm)
#endif
#ifndef _OPENMP
tmp_percent = 0;
#endif
for (i = 0; i < nlines; i+=SAERO_WINDOW)
{
#ifndef _OPENMP
/* update status, but not if multi-threaded */
curr_tmp_percent = 100 * i / nlines;
if (curr_tmp_percent > tmp_percent)
{
tmp_percent = curr_tmp_percent;
if (tmp_percent % 10 == 0)
{
printf ("%d%% ", tmp_percent);
fflush (stdout);
}
}
#endif
curr_pix = i * nsamps;
for (j = 0; j < nsamps; j+=SAERO_WINDOW, curr_pix+=SAERO_WINDOW)
{
/* If this pixel is fill */
if (level1_qa_is_fill (qaband[curr_pix]))
{
ipflag[curr_pix] = (1 << IPFLAG_FILL);
continue;
}
/* Get the lat/long for the current pixel (which may not be the
center of the aerosol window), for the center of that pixel */
#ifdef USE_GCTP
img.l = i - 0.5;
img.s = j + 0.5;
img.is_fill = false;
if (!from_space (space, &img, &geo))
{
sprintf (errmsg, "Mapping line/sample (%d, %d) to "
"geolocation coords", i, j);
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
lat = geo.lat * RAD2DEG;
lon = geo.lon * RAD2DEG;
#else
utmtodeg (&space_def, i, j, &lat, &lon);
#endif
/* Use that lat/long to determine the line/sample in the
CMG-related lookup tables, using the center of the UL
pixel. Note, we are basically making sure the line/sample
combination falls within -90, 90 and -180, 180 global climate
data boundaries. However, the source code below uses lcmg+1
and scmg+1, which for some scenes may wrap around the
dateline or the poles. Thus we need to wrap the CMG data
around to the beginning of the array. */
/* Each CMG pixel is 0.05 x 0.05 degrees. Use the center of the
pixel for each calculation. Negative latitude values should
be the largest line values in the CMG grid. Negative
longitude values should be the smallest sample values in the
CMG grid. */
/* The line/sample calculation from the x/ycmg values are not
rounded. The interpolation of the value using line+1 and
sample+1 are based on the truncated numbers, therefore
rounding up is not appropriate. */
ycmg = (89.975 - lat) * 20.0; /* vs / 0.05 */
xcmg = (179.975 + lon) * 20.0; /* vs / 0.05 */
lcmg = (int) ycmg;
scmg = (int) xcmg;
/* Handle the edges of the lat/long values in the CMG grid */
if (lcmg < 0)
lcmg = 0;
else if (lcmg >= CMG_NBLAT)
lcmg = CMG_NBLAT - 1;
if (scmg < 0)
scmg = 0;
else if (scmg >= CMG_NBLON)
scmg = CMG_NBLON - 1;
/* If the current CMG pixel is at the edge of the CMG array, then
allow the next pixel for interpolation to wrap around the
array */
if (scmg >= CMG_NBLON - 1) /* 180 degrees so wrap around */
scmg1 = 0;
else
scmg1 = scmg + 1;
if (lcmg >= CMG_NBLAT - 1) /* -90 degrees, so set the next pixel
to also use -90. */
lcmg1 = lcmg;
else
lcmg1 = lcmg + 1;
/* Determine the fractional difference between the integer location
and floating point pixel location to be used for interpolation */
u = (ycmg - lcmg);
v = (xcmg - scmg);
one_minus_u = 1.0 - u;
one_minus_v = 1.0 - v;
one_minus_u_x_one_minus_v = one_minus_u * one_minus_v;
one_minus_u_x_v = one_minus_u * v;
u_x_one_minus_v = u * one_minus_v;
u_x_v = u * v;
/* Determine the band ratios and slope/intercept */
ratio_pix11 = lcmg * RATIO_NBLON + scmg;
ratio_pix12 = lcmg * RATIO_NBLON + scmg1;
ratio_pix21 = lcmg1 * RATIO_NBLON + scmg;
ratio_pix22 = lcmg1 * RATIO_NBLON + scmg1;
rb1 = ratiob1[ratio_pix11] * 0.001; /* vs. / 1000. */
rb2 = ratiob2[ratio_pix11] * 0.001; /* vs. / 1000. */
if (rb2 > 1.0 || rb1 > 1.0 || rb2 < 0.1 || rb1 < 0.1)
{
slpratiob1[ratio_pix11] = 0;
slpratiob2[ratio_pix11] = 0;
slpratiob7[ratio_pix11] = 0;
intratiob1[ratio_pix11] = 550;
intratiob2[ratio_pix11] = 600;
intratiob7[ratio_pix11] = 2000;
}
else if (sndwi[ratio_pix11] < 200)
{
slpratiob1[ratio_pix11] = 0;
slpratiob2[ratio_pix11] = 0;
slpratiob7[ratio_pix11] = 0;
intratiob1[ratio_pix11] = ratiob1[ratio_pix11];
intratiob2[ratio_pix11] = ratiob2[ratio_pix11];
intratiob7[ratio_pix11] = ratiob7[ratio_pix11];
}
rb1 = ratiob1[ratio_pix12] * 0.001; /* vs. / 1000. */
rb2 = ratiob2[ratio_pix12] * 0.001; /* vs. / 1000. */
if (rb2 > 1.0 || rb1 > 1.0 || rb2 < 0.1 || rb1 < 0.1)
{
slpratiob1[ratio_pix12] = 0;
slpratiob2[ratio_pix12] = 0;
slpratiob7[ratio_pix12] = 0;
intratiob1[ratio_pix12] = 550;
intratiob2[ratio_pix12] = 600;
intratiob7[ratio_pix12] = 2000;
}
else if (sndwi[ratio_pix12] < 200)
{
slpratiob1[ratio_pix12] = 0;
slpratiob2[ratio_pix12] = 0;
slpratiob7[ratio_pix12] = 0;
intratiob1[ratio_pix12] = ratiob1[ratio_pix12];
intratiob2[ratio_pix12] = ratiob2[ratio_pix12];
intratiob7[ratio_pix12] = ratiob7[ratio_pix12];
}
rb1 = ratiob1[ratio_pix21] * 0.001; /* vs. / 1000. */
rb2 = ratiob2[ratio_pix21] * 0.001; /* vs. / 1000. */
if (rb2 > 1.0 || rb1 > 1.0 || rb2 < 0.1 || rb1 < 0.1)
{
slpratiob1[ratio_pix21] = 0;
slpratiob2[ratio_pix21] = 0;
slpratiob7[ratio_pix21] = 0;
intratiob1[ratio_pix21] = 550;
intratiob2[ratio_pix21] = 600;
intratiob7[ratio_pix21] = 2000;
}
else if (sndwi[ratio_pix21] < 200)
{
slpratiob1[ratio_pix21] = 0;
slpratiob2[ratio_pix21] = 0;
slpratiob7[ratio_pix21] = 0;
intratiob1[ratio_pix21] = ratiob1[ratio_pix21];
intratiob2[ratio_pix21] = ratiob2[ratio_pix21];
intratiob7[ratio_pix21] = ratiob7[ratio_pix21];
}
rb1 = ratiob1[ratio_pix22] * 0.001; /* vs. / 1000. */
rb2 = ratiob2[ratio_pix22] * 0.001; /* vs. / 1000. */
if (rb2 > 1.0 || rb1 > 1.0 || rb2 < 0.1 || rb1 < 0.1)
{
slpratiob1[ratio_pix22] = 0;
slpratiob2[ratio_pix22] = 0;
slpratiob7[ratio_pix22] = 0;
intratiob1[ratio_pix22] = 550;
intratiob2[ratio_pix22] = 600;
intratiob7[ratio_pix22] = 2000;
}
else if (sndwi[ratio_pix22] < 200)
{
slpratiob1[ratio_pix22] = 0;
slpratiob2[ratio_pix22] = 0;
slpratiob7[ratio_pix22] = 0;
intratiob1[ratio_pix22] = ratiob1[ratio_pix22];
intratiob2[ratio_pix22] = ratiob2[ratio_pix22];
intratiob7[ratio_pix22] = ratiob7[ratio_pix22];
}
/* Interpolate the slope/intercept for each band, and unscale */
slpr11 = slpratiob1[ratio_pix11] * 0.001; /* vs / 1000 */
intr11 = intratiob1[ratio_pix11] * 0.001; /* vs / 1000 */
slpr12 = slpratiob1[ratio_pix12] * 0.001; /* vs / 1000 */
intr12 = intratiob1[ratio_pix12] * 0.001; /* vs / 1000 */
slpr21 = slpratiob1[ratio_pix21] * 0.001; /* vs / 1000 */
intr21 = intratiob1[ratio_pix21] * 0.001; /* vs / 1000 */
slpr22 = slpratiob1[ratio_pix22] * 0.001; /* vs / 1000 */
intr22 = intratiob1[ratio_pix22] * 0.001; /* vs / 1000 */
slprb1 = slpr11 * one_minus_u_x_one_minus_v +
slpr12 * one_minus_u_x_v +
slpr21 * u_x_one_minus_v +
slpr22 * u_x_v;
intrb1 = intr11 * one_minus_u_x_one_minus_v +
intr12 * one_minus_u_x_v +
intr21 * u_x_one_minus_v +
intr22 * u_x_v;
slpr11 = slpratiob2[ratio_pix11] * 0.001; /* vs / 1000 */
intr11 = intratiob2[ratio_pix11] * 0.001; /* vs / 1000 */
slpr12 = slpratiob2[ratio_pix12] * 0.001; /* vs / 1000 */
intr12 = intratiob2[ratio_pix12] * 0.001; /* vs / 1000 */
slpr21 = slpratiob2[ratio_pix21] * 0.001; /* vs / 1000 */
intr21 = intratiob2[ratio_pix21] * 0.001; /* vs / 1000 */
slpr22 = slpratiob2[ratio_pix22] * 0.001; /* vs / 1000 */
intr22 = intratiob2[ratio_pix22] * 0.001; /* vs / 1000 */
slprb2 = slpr11 * one_minus_u_x_one_minus_v +
slpr12 * one_minus_u_x_v +
slpr21 * u_x_one_minus_v +
slpr22 * u_x_v;
intrb2 = intr11 * one_minus_u_x_one_minus_v +
intr12 * one_minus_u_x_v +
intr21 * u_x_one_minus_v +
intr22 * u_x_v;
slpr11 = slpratiob7[ratio_pix11] * 0.001; /* vs / 1000 */
intr11 = intratiob7[ratio_pix11] * 0.001; /* vs / 1000 */
slpr12 = slpratiob7[ratio_pix12] * 0.001; /* vs / 1000 */
intr12 = intratiob7[ratio_pix12] * 0.001; /* vs / 1000 */
slpr21 = slpratiob7[ratio_pix21] * 0.001; /* vs / 1000 */
intr21 = intratiob7[ratio_pix21] * 0.001; /* vs / 1000 */
slpr22 = slpratiob7[ratio_pix22] * 0.001; /* vs / 1000 */
intr22 = intratiob7[ratio_pix22] * 0.001; /* vs / 1000 */
slprb7 = slpr11 * one_minus_u_x_one_minus_v +
slpr12 * one_minus_u_x_v +
slpr21 * u_x_one_minus_v +
slpr22 * u_x_v;
intrb7 = intr11 * one_minus_u_x_one_minus_v +
intr12 * one_minus_u_x_v +
intr21 * u_x_one_minus_v +
intr22 * u_x_v;
/* Calculate NDWI variables for the band ratios */
xndwi = ((double) sband[SRS_BAND8A][curr_pix] -
(double) (sband[SRS_BAND12][curr_pix] * 0.5)) /
((double) sband[SRS_BAND8A][curr_pix] +
(double) (sband[SRS_BAND12][curr_pix] * 0.5));
ndwi_th1 = (andwi[ratio_pix11] + 2.0 *
sndwi[ratio_pix11]) * 0.001;
ndwi_th2 = (andwi[ratio_pix11] - 2.0 *
sndwi[ratio_pix11]) * 0.001;
if (xndwi > ndwi_th1)
xndwi = ndwi_th1;
if (xndwi < ndwi_th2)
xndwi = ndwi_th2;
/* Initialize the band ratios */
for (ib = 0; ib < NSRS_BANDS; ib++)
{
erelc[ib] = -1.0;
troatm[ib] = 0.0;
}
/* Compute the band ratio - coastal aerosol, blue, red, SWIR */
erelc[DNS_BAND1] = (xndwi * slprb1 + intrb1);
erelc[DNS_BAND2] = (xndwi * slprb2 + intrb2);
erelc[DNS_BAND4] = 1.0;
erelc[DNS_BAND12] = (xndwi * slprb7 + intrb7);
/* Retrieve the TOA reflectance values for the current pixel; use
a NxN average */
pix_count = 0;
ew_line = i+SAERO_WINDOW;
for (iline = i; iline < ew_line; iline++)
{
if (iline >= nlines) continue;
ew_samp = j+SAERO_WINDOW;
for (isamp = j; isamp < ew_samp; isamp++)
{
if (isamp >= nsamps) continue;
curr_win_pix = iline * nsamps + isamp;
if (level1_qa_is_fill (qaband[curr_win_pix])) continue;
troatm[DNS_BAND1] += toaband[DNS_BAND1][curr_win_pix];
troatm[DNS_BAND2] += toaband[DNS_BAND2][curr_win_pix];
troatm[DNS_BAND4] += toaband[DNS_BAND4][curr_win_pix];
troatm[DNS_BAND12] += toaband[DNS_BAND12][curr_win_pix];
pix_count++;
}
}
troatm[DNS_BAND1] /= pix_count;
troatm[DNS_BAND2] /= pix_count;
troatm[DNS_BAND4] /= pix_count;
troatm[DNS_BAND12] /= pix_count;
/* Retrieve the aerosol information for low eps 1.0 */
iband1 = DNS_BAND4; /* red band */
iaots = 0;
if (use_orig_aero)
{
pres = tp[curr_pix];
uoz = tozi[curr_pix];
uwv = twvi[curr_pix];
retval = subaeroret (input->meta.sat, false, iband1, xts, xtv,
xmus, xmuv, xfi, cosxfi, pres, uoz, uwv, erelc, troatm,
tpres, rolutt, transt, xtsstep, xtsmin, xtvstep, xtvmin,
sphalbt, normext, tsmax, tsmin, nbfic, nbfi, tts, indts,
ttv, tauray, ogtransa1, ogtransb0, ogtransb1, wvtransa,
wvtransb, oztransa, &raot, &residual, &iaots, eps1);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing aerosol retrieval.");
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
}
else
subaeroret_new (input->meta.sat, false, iband1, erelc, troatm,
tgo_arr, roatm_iaMax, roatm_coef, ttatmg_coef, satm_coef,
normext_p0a3_arr, &raot, &residual, &iaots, eps1);
/* Save the data */
residual1 = residual;
/* Retrieve the aerosol information for moderate eps 1.75 */
if (use_orig_aero)
{
retval = subaeroret (input->meta.sat, false, iband1, xts, xtv,
xmus, xmuv, xfi, cosxfi, pres, uoz, uwv, erelc, troatm,
tpres, rolutt, transt, xtsstep, xtsmin, xtvstep, xtvmin,
sphalbt, normext, tsmax, tsmin, nbfic, nbfi, tts, indts,
ttv, tauray, ogtransa1, ogtransb0, ogtransb1, wvtransa,
wvtransb, oztransa, &raot, &residual, &iaots, eps2);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing aerosol retrieval.");
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
}
else
subaeroret_new (input->meta.sat, false, iband1, erelc, troatm,
tgo_arr, roatm_iaMax, roatm_coef, ttatmg_coef, satm_coef,
normext_p0a3_arr, &raot, &residual, &iaots, eps2);
/* Save the data */
residual2 = residual;
/* Retrieve the aerosol information for high eps 2.5 */
if (use_orig_aero)
{
retval = subaeroret (input->meta.sat, false, iband1, xts, xtv,
xmus, xmuv, xfi, cosxfi, pres, uoz, uwv, erelc, troatm,
tpres, rolutt, transt, xtsstep, xtsmin, xtvstep, xtvmin,
sphalbt, normext, tsmax, tsmin, nbfic, nbfi, tts, indts,
ttv, tauray, ogtransa1, ogtransb0, ogtransb1, wvtransa,
wvtransb, oztransa, &raot, &residual, &iaots, eps3);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing aerosol retrieval.");
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
}
else
subaeroret_new (input->meta.sat, false, iband1, erelc, troatm,
tgo_arr, roatm_iaMax, roatm_coef, ttatmg_coef, satm_coef,
normext_p0a3_arr, &raot, &residual, &iaots, eps3);
/* Save the data */
residual3 = residual;
/* Find the eps (angstrom coefficient for AOT) that minimizes the
residual */
xc = residual1 - residual3;
xf = residual2 - residual3;
coefa = (xc*xe - xb*xf) / (xa*xe - xb*xd);
coefb = (xa*xf - xc*xd) / (xa*xe - xb*xd);
/* Local extremum */
epsmin = -coefb / (2.0 * coefa);
resepsmin = xa*epsmin*epsmin + xb*epsmin + xc;
if ((epsmin < LOW_EPS) || (epsmin > HIGH_EPS))
{
if (residual1 < residual3)
epsmin = eps1;
else
epsmin = eps3;
}
else
{
if ((resepsmin > residual1) || (resepsmin > residual3))
{
if (residual1 < residual3)
epsmin = eps1;
else
epsmin = eps3;
}
}
eps = epsmin;
if (use_orig_aero)
{
retval = subaeroret (input->meta.sat, false, iband1, xts,
xtv, xmus, xmuv, xfi, cosxfi, pres, uoz, uwv, erelc,
troatm, tpres, rolutt, transt, xtsstep, xtsmin,
xtvstep, xtvmin, sphalbt, normext, tsmax, tsmin, nbfic,
nbfi, tts, indts, ttv, tauray, ogtransa1, ogtransb0,
ogtransb1, wvtransa, wvtransb, oztransa, &raot,
&residual, &iaots, eps);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing aerosol retrieval.");
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
}
else
subaeroret_new (input->meta.sat, false, iband1, erelc, troatm,
tgo_arr, roatm_iaMax, roatm_coef, ttatmg_coef, satm_coef,
normext_p0a3_arr, &raot, &residual, &iaots, eps);
teps[curr_pix] = eps;
taero[curr_pix] = raot;
corf = raot / xmus;
/* Check the model residual. Corf represents aerosol impact.
Test the quality of the aerosol inversion. */
if (residual < (0.015 + 0.005 * corf + 0.10 * troatm[DNS_BAND12]))
{
/* Test if NIR band 8a makes sense. Use a NxN window average. */
iband = DNS_BAND8A;
rotoa = 0.0;
pix_count = 0;
ew_line = i+SAERO_WINDOW;
for (iline = i; iline < ew_line; iline++)
{
if (iline >= nlines) continue;
curr_win_pix = iline * nsamps + j;
ew_samp = j+SAERO_WINDOW;
for (isamp = j; isamp < ew_samp; isamp++, curr_win_pix++)
{
if (isamp >= nsamps) continue;
rotoa += toaband[iband][curr_win_pix];
pix_count++;
}
}
rotoa /= pix_count;
raot550nm = raot;
if (use_orig_aero)
{
retval = atmcorlamb2 (input->meta.sat, xts, xtv, xmus,
xmuv, xfi, cosxfi, raot550nm, iband, pres, tpres,
aot550nm, rolutt, transt, xtsstep, xtsmin, xtvstep,
xtvmin, sphalbt, normext, tsmax, tsmin, nbfic, nbfi,
tts, indts, ttv, uoz, uwv, tauray, ogtransa1, ogtransb0,
ogtransb1, wvtransa, wvtransb, oztransa, rotoa,
&roslamb, &tgo, &roatm, &ttatmg, &satm, &xrorayp, eps);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing lambertian "
"atmospheric correction type 2.");
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
}
else
atmcorlamb2_new (input->meta.sat, tgo_arr[iband],
aot550nm[roatm_iaMax[iband]], &roatm_coef[iband][0],
&ttatmg_coef[iband][0], &satm_coef[iband][0], raot550nm,
iband, normext_p0a3_arr[iband], rotoa, &roslamb, eps);
ros5 = roslamb;
/* Test if red band 4 makes sense. Use a NxN window average. */
iband = DNS_BAND4;
rotoa = 0.0;
pix_count = 0;
ew_line = i+SAERO_WINDOW;
for (iline = i; iline < ew_line; iline++)
{
if (iline >= nlines) continue;
curr_win_pix = iline * nsamps + j;
ew_samp = j+SAERO_WINDOW;
for (isamp = j; isamp < ew_samp; isamp++, curr_win_pix++)
{
if (isamp >= nsamps) continue;
rotoa += toaband[iband][curr_win_pix];
pix_count++;
}
}
rotoa /= pix_count;
raot550nm = raot;
if (use_orig_aero)
{
retval = atmcorlamb2 (input->meta.sat, xts, xtv, xmus,
xmuv, xfi, cosxfi, raot550nm, iband, pres, tpres,
aot550nm, rolutt, transt, xtsstep, xtsmin, xtvstep,
xtvmin, sphalbt, normext, tsmax, tsmin, nbfic, nbfi,
tts, indts, ttv, uoz, uwv, tauray, ogtransa1, ogtransb0,
ogtransb1, wvtransa, wvtransb, oztransa, rotoa,
&roslamb, &tgo, &roatm, &ttatmg, &satm, &xrorayp, eps);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing lambertian "
"atmospheric correction type 2.");
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
}
else
atmcorlamb2_new (input->meta.sat, tgo_arr[iband],
aot550nm[roatm_iaMax[iband]], &roatm_coef[iband][0],
&ttatmg_coef[iband][0], &satm_coef[iband][0], raot550nm,
iband, normext_p0a3_arr[iband], rotoa, &roslamb, eps);
ros4 = roslamb;
/* Use the NDVI to validate the reflectance values or flag
as water */
if ((ros5 > 0.1) && ((ros5 - ros4) / (ros5 + ros4) > 0))
{
/* Clear pixel with valid aerosol retrieval */
ipflag[curr_pix] |= (1 << IPFLAG_VALID);
}
else
{
/* Flag as water */
ipflag[curr_pix] = (1 << IPFLAG_WATER);
}
}
else
{
/* Flag as water */
ipflag[curr_pix] = (1 << IPFLAG_WATER);
}
/* Retest any water pixels to verify they are water and obtain
their aerosol */
if (lasrc_qa_is_water(ipflag[curr_pix]))
{
/* Initialize the band ratios */
for (ib = 0; ib < NSR_BANDS; ib++)
{
erelc[ib] = -1.0;
troatm[ib] = 0.0;
}
/* Retrieve the TOA reflectance values for the current pixel;
use a NxN average */
pix_count = 0;
ew_line = i+SAERO_WINDOW;
for (iline = i; iline < ew_line; iline++)
{
if (iline >= nlines) continue;
curr_win_pix = iline * nsamps + j;
ew_samp = j+SAERO_WINDOW;
for (isamp = j; isamp < ew_samp; isamp++, curr_win_pix++)
{
if (isamp >= nsamps) continue;
if (level1_qa_is_fill (qaband[curr_win_pix])) continue;
troatm[DNS_BAND1] +=
toaband[DNS_BAND1][curr_win_pix];
troatm[DNS_BAND4] +=
toaband[DNS_BAND4][curr_win_pix];
troatm[DNS_BAND8A] +=
toaband[DNS_BAND8A][curr_win_pix];
troatm[DNS_BAND12] +=
toaband[DNS_BAND12][curr_win_pix];
pix_count++;
}
}
troatm[DNS_BAND1] /= pix_count;
troatm[DNS_BAND4] /= pix_count;
troatm[DNS_BAND8A] /= pix_count;
troatm[DNS_BAND12] /= pix_count;
/* Set the band ratio - coastal aerosol, red, NIR, SWIR */
erelc[DNS_BAND1] = 1.0;
erelc[DNS_BAND4] = 1.0;
erelc[DNS_BAND8A] = 1.0;
erelc[DNS_BAND12] = 1.0;
/* Retrieve the water aerosol information for eps 1.5 */
eps = WATER_EPS;
iaots = 0;
if (use_orig_aero)
{
retval = subaeroret (input->meta.sat, true /*water*/,
iband1, xts, xtv, xmus, xmuv, xfi, cosxfi, pres, uoz,
uwv, erelc, troatm, tpres, rolutt, transt, xtsstep,
xtsmin, xtvstep, xtvmin, sphalbt, normext, tsmax,
tsmin, nbfic, nbfi, tts, indts, ttv, tauray, ogtransa1,
ogtransb0, ogtransb1, wvtransa, wvtransb, oztransa,
&raot, &residual, &iaots, eps);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing aerosol retrieval.");
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
}
else
subaeroret_new (input->meta.sat, true /*water*/, iband1,
erelc, troatm, tgo_arr, roatm_iaMax, roatm_coef,
ttatmg_coef, satm_coef, normext_p0a3_arr, &raot,
&residual, &iaots, eps);
teps[curr_pix] = eps;
taero[curr_pix] = raot;
corf = raot / xmus;
/* Test band 1 reflectance to eliminate negative */
iband = DNS_BAND1;
rotoa = troatm[DNS_BAND1];
raot550nm = raot;
if (use_orig_aero)
{
retval = atmcorlamb2 (input->meta.sat, xts, xtv, xmus,
xmuv, xfi, cosxfi, raot550nm, iband, pres, tpres,
aot550nm, rolutt, transt, xtsstep, xtsmin, xtvstep,
xtvmin, sphalbt, normext, tsmax, tsmin, nbfic, nbfi,
tts, indts, ttv, uoz, uwv, tauray, ogtransa1, ogtransb0,
ogtransb1, wvtransa, wvtransb, oztransa, rotoa,
&roslamb, &tgo, &roatm, &ttatmg, &satm, &xrorayp, eps);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing lambertian "
"atmospheric correction type 2.");
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
}
else
atmcorlamb2_new (input->meta.sat, tgo_arr[iband],
aot550nm[roatm_iaMax[iband]], &roatm_coef[iband][0],
&ttatmg_coef[iband][0], &satm_coef[iband][0], raot550nm,
iband, normext_p0a3_arr[iband], rotoa, &roslamb, eps);
ros1 = roslamb;
if (residual > (0.010 + 0.005 * corf) || ros1 < 0)
{
/* Not a valid water pixel (possibly urban). Clear all
the QA bits, and mark it as IPFLAG_FAILED. */
ipflag[curr_pix] = (1 << IPFLAG_FAILED);
}
else
{
/* Valid water pixel */
ipflag[curr_pix] = (1 << IPFLAG_WATER);
ipflag[curr_pix] |= (1 << IPFLAG_VALID);
}
} /* if water pixel */
/* Fill in the remaining taero and teps values for the window,
using the current pixel. Skip fill pixels. */
for (iline = i; iline < i+SAERO_WINDOW; iline++)
{
if (iline >= nlines) continue;
curr_win_pix = iline * nsamps + j;
for (isamp = j; isamp < j+SAERO_WINDOW;
isamp++, curr_win_pix++)
{
if (isamp >= nsamps) continue;
if (level1_qa_is_fill (qaband[curr_win_pix])) continue;
teps[curr_win_pix] = teps[curr_pix];
taero[curr_win_pix] = taero[curr_pix];
}
}
} /* end for j */
} /* end for i */
/* end aerosol inversion for the NxN window */
#ifndef _OPENMP
/* update status */
printf ("100%%\n");
fflush (stdout);
#endif
/* Done with the ratiob* arrays */
free (andwi); andwi = NULL;
free (sndwi); sndwi = NULL;
free (ratiob1); ratiob1 = NULL;
free (ratiob2); ratiob2 = NULL;
free (ratiob7); ratiob7 = NULL;
free (intratiob1); intratiob1 = NULL;
free (intratiob2); intratiob2 = NULL;
free (intratiob7); intratiob7 = NULL;
free (slpratiob1); slpratiob1 = NULL;
free (slpratiob2); slpratiob2 = NULL;
free (slpratiob7); slpratiob7 = NULL;
/* Done with the DEM, water vapor, and ozone arrays */
free (dem); dem = NULL;
free (wv); wv = NULL;
free (oz); oz = NULL;
#ifdef WRITE_TAERO
/* Write the ipflag values for comparison with other algorithms */
aero_fptr = fopen ("ipflag.img", "w");
fwrite (ipflag, npixels, sizeof (uint8), aero_fptr);
fclose (aero_fptr);
/* Write the aerosol values for comparison with other algorithms */
aero_fptr = fopen ("aerosols.img", "w");
fwrite (taero, npixels, sizeof (float), aero_fptr);
fclose (aero_fptr);
#endif
/* Use the UL corner of the aerosol windows to interpolate the remaining
aerosol pixels in the window, including the UL corner of the window */
mytime = time(NULL);
printf ("Interpolating the aerosol values in the 6x6 windows %s\n",
ctime(&mytime)); fflush(stdout);
aerosol_interp_sentinel (SAERO_WINDOW, qaband, ipflag, taero, nlines,
nsamps);
#ifdef WRITE_TAERO
/* Write the ipflag values for comparison with other algorithms */
aero_fptr = fopen ("ipflag2.img", "w");
fwrite (ipflag, npixels, sizeof (uint8), aero_fptr);
fclose (aero_fptr);
/* Write the aerosol values for comparison with other algorithms */
aero_fptr = fopen ("aerosols2.img", "w");
fwrite (taero, npixels, sizeof (float), aero_fptr);
fclose (aero_fptr);
#endif
/* Expand the area around failed pixels to smooth aerosols in the area */
mytime = time(NULL);
printf ("Expand the failed pixels %s\n", ctime(&mytime));
ipflag_expand_failed_sentinel (ipflag, nlines, nsamps);
#ifdef WRITE_TAERO
/* Write the ipflag values for comparison with other algorithms */
aero_fptr = fopen ("ipflag3.img", "w");
fwrite (ipflag, npixels, sizeof (uint8), aero_fptr);
fclose (aero_fptr);
/* Write the aerosol values for comparison with other algorithms */
aero_fptr = fopen ("aerosols3.img", "w");
fwrite (taero, npixels, sizeof (float), aero_fptr);
fclose (aero_fptr);
#endif
/* Fill in the failed pixels with an average of the clear surrounding
window pixels */
mytime = time(NULL);
printf ("Averaging the failed pixels %s\n", ctime(&mytime)); fflush(stdout);
aero_avg_failed_sentinel (qaband, ipflag, taero, teps, nlines, nsamps);
#ifdef WRITE_TAERO
/* Write the ipflag values for comparison with other algorithms */
aero_fptr = fopen ("ipflag4.img", "w");
fwrite (ipflag, nlines*nsamps, sizeof (uint8), aero_fptr);
fclose (aero_fptr);
/* Write the aerosol values for comparison with other algorithms */
aero_fptr = fopen ("aerosols4.img", "w");
fwrite (taero, nlines*nsamps, sizeof (float), aero_fptr);
fclose (aero_fptr);
#endif
/* Perform the second level of atmospheric correction using the aerosols */
mytime = time(NULL);
printf ("Performing atmospheric correction ... %s\n", ctime(&mytime));
/* Loop through all the bands */
for (ib = 0; ib <= DNS_BAND12; ib++)
{
printf (" Band %s\n", SENTINEL_BANDNAME[ib]); fflush(stdout);
#ifdef PROC_ALL_BANDS
/* Special handling of band 10 if turned on */
if (ib == DNS_BAND10)
{ /* Band 10 - just use the TOA values */
printf (" -- Band 10 so just use the TOA values\n");
for (i = 0; i < npixels; i++)
sband[ib][i] = toaband[ib][i];
/* Skip to the next band */
continue;
}
#endif
/* Process the remaining bands normally */
#ifdef _OPENMP
#pragma omp parallel for private (i, rsurf, rotoa, raot550nm, eps, pres, uwv, uoz, retval, tmpf, roslamb, tgo, roatm, ttatmg, satm, xrorayp)
#endif
for (i = 0; i < npixels; i++)
{
/* If this pixel is fill, then don't process */
if (level1_qa_is_fill (qaband[i]))
continue;
/* Correct all pixels */
rotoa = toaband[ib][i];
raot550nm = taero[i];
eps = teps[i];
if (use_orig_aero)
{
pres = tp[i];
uwv = twvi[i];
uoz = tozi[i];
retval = atmcorlamb2 (input->meta.sat, xts, xtv, xmus, xmuv,
xfi, cosxfi, raot550nm, ib, pres, tpres, aot550nm,
rolutt, transt, xtsstep, xtsmin, xtvstep, xtvmin, sphalbt,
normext, tsmax, tsmin, nbfic, nbfi, tts, indts, ttv, uoz,
uwv, tauray, ogtransa1, ogtransb0, ogtransb1, wvtransa,
wvtransb, oztransa, rotoa, &roslamb, &tgo, &roatm, &ttatmg,
&satm, &xrorayp, eps);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing lambertian "
"atmospheric correction type 2.");
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
}
else
atmcorlamb2_new (input->meta.sat, tgo_arr[ib],
aot550nm[roatm_iaMax[ib]], &roatm_coef[ib][0],
&ttatmg_coef[ib][0], &satm_coef[ib][0], raot550nm, ib,
normext_p0a3_arr[ib], rotoa, &roslamb, eps);
/* If this is the coastal aerosol band then set the aerosol
bits in the QA band */
if (ib == DNS_BAND1)
{
/* Set up aerosol QA bits */
rsurf = sband[ib][i];
tmpf = fabs (rsurf - roslamb);
if (tmpf <= LOW_AERO_THRESH)
{ /* Set first aerosol bit (low aerosols) */
ipflag[i] |= (1 << AERO1_QA);
}
else
{
if (tmpf < AVG_AERO_THRESH)
{ /* Set second aerosol bit (average aerosols) */
ipflag[i] |= (1 << AERO2_QA);
}
else
{ /* Set both aerosol bits (high aerosols) */
ipflag[i] |= (1 << AERO1_QA);
ipflag[i] |= (1 << AERO2_QA);
}
}
} /* end if this is the coastal aerosol band */
/* Save the unscaled surface reflectance value */
if (roslamb < MIN_VALID_REFL)
sband[ib][i] = MIN_VALID_REFL;
else if (roslamb > MAX_VALID_REFL)
sband[ib][i] = MAX_VALID_REFL;
else
sband[ib][i] = roslamb;
} /* end for i */
} /* end for ib */
/* Free memory for arrays no longer needed */
if (use_orig_aero)
{
free (twvi);
free (tozi);
free (tp);
}
free (taero);
free (teps);
/* Write the data to the output file */
mytime = time(NULL);
printf ("Writing surface reflectance corrected data to the output "
"files ... %s", ctime(&mytime)); fflush(stdout);
/* Open the output file */
sr_output = open_output (xml_metadata, input, OUTPUT_SR);
if (sr_output == NULL)
{ /* error message already printed */
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Loop through the reflectance bands and write the data */
for (ib = 0; ib <= DNS_BAND12; ib++)
{
/* Scale the output data from float to int16 */
printf (" Band %s: %s\n", SENTINEL_BANDNAME[ib],
sr_output->metadata.band[ib].file_name);
convert_output (sband, ib, nlines, nsamps, false, out_band);
/* Write the scaled product */
if (put_output_lines (sr_output, out_band, ib, 0, nlines,
sizeof (uint16)) != SUCCESS)
{
sprintf (errmsg, "Writing output data for band %d", ib);
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Create the ENVI header file this band */
if (create_envi_struct (&sr_output->metadata.band[ib],
&xml_metadata->global, &envi_hdr) != SUCCESS)
{
sprintf (errmsg, "Creating ENVI header structure.");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Write the ENVI header */
strcpy (envi_file, sr_output->metadata.band[ib].file_name);
cptr = strchr (envi_file, '.');
strcpy (cptr, ".hdr");
if (write_envi_hdr (envi_file, &envi_hdr) != SUCCESS)
{
sprintf (errmsg, "Writing ENVI header file.");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
}
/* Append the surface reflectance bands to the XML file */
if (append_metadata (NREFLS_BANDS, sr_output->metadata.band, xml_infile)
!= SUCCESS)
{
sprintf (errmsg, "Appending surface reflectance bands to the "
"XML file.");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Write the aerosol QA band */
printf (" Aerosol Band %d: %s\n", SRS_AEROSOL+1,
sr_output->metadata.band[SRS_AEROSOL].file_name);
if (put_output_lines (sr_output, ipflag, SRS_AEROSOL, 0, nlines,
sizeof (uint8)) != SUCCESS)
{
sprintf (errmsg, "Writing aerosol QA output data");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Free memory for ipflag data */
free (ipflag);
/* Create the ENVI header for the aerosol QA band */
if (create_envi_struct (&sr_output->metadata.band[SRS_AEROSOL],
&xml_metadata->global, &envi_hdr) != SUCCESS)
{
sprintf (errmsg, "Creating ENVI header structure.");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Write the ENVI header */
strcpy (envi_file, sr_output->metadata.band[SRS_AEROSOL].file_name);
cptr = strchr (envi_file, '.');
strcpy (cptr, ".hdr");
if (write_envi_hdr (envi_file, &envi_hdr) != SUCCESS)
{
sprintf (errmsg, "Writing ENVI header file.");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Append the aerosol QA band to the XML file */
if (append_metadata (1, &sr_output->metadata.band[SRS_AEROSOL],
xml_infile) != SUCCESS)
{
sprintf (errmsg, "Appending aerosol QA band to XML file.");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Close the output surface reflectance products */
close_output (sat, sr_output, OUTPUT_SR);
free_output (sr_output, OUTPUT_SR);
/* Free the spatial mapping pointer */
free (space);
/* Free the data arrays */
free (rolutt);
free (transt);
free (sphalbt);
free (normext);
free (tsmax);
free (tsmin);
free (nbfic);
free (nbfi);
free (ttv);
/* Successful completion */
mytime = time(NULL);
printf ("Surface reflectance correction complete ... %s\n", ctime(&mytime));
return (SUCCESS);
}
|
hillclimb.c | #define _POSIX_C_SOURCE 200112L
#define WIN32_LEAN_AND_MEAN
#include <math.h>
#include <ctype.h>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#define HASHN 3 // number of multiplies in hash
#define SHIFT_RANGE 1 // radius of shift search
#define CONST_RANGE 2 // radius of const search
#define QUALITY 18 // 2^N iterations of estimate samples
#define THRESHOLD 1.95 // regenerate anything lower than this estimate
static int optind = 1;
static int opterr = 1;
static int optopt;
static char *optarg;
static int
getopt(int argc, char * const argv[], const char *optstring)
{
static int optpos = 1;
const char *arg;
(void)argc;
/* Reset? */
if (optind == 0) {
optind = 1;
optpos = 1;
}
arg = argv[optind];
if (arg && strcmp(arg, "--") == 0) {
optind++;
return -1;
} else if (!arg || arg[0] != '-' || !isalnum(arg[1])) {
return -1;
} else {
const char *opt = strchr(optstring, arg[optpos]);
optopt = arg[optpos];
if (!opt) {
if (opterr && *optstring != ':')
fprintf(stderr, "%s: illegal option: %c\n", argv[0], optopt);
return '?';
} else if (opt[1] == ':') {
if (arg[optpos + 1]) {
optarg = (char *)arg + optpos + 1;
optind++;
optpos = 1;
return optopt;
} else if (argv[optind + 1]) {
optarg = (char *)argv[optind + 1];
optind += 2;
optpos = 1;
return optopt;
} else {
if (opterr && *optstring != ':')
fprintf(stderr,
"%s: option requires an argument: %c\n",
argv[0], optopt);
return *optstring == ':' ? ':' : '?';
}
} else {
if (!arg[++optpos]) {
optind++;
optpos = 1;
}
return optopt;
}
}
}
#if defined(__unix__)
#include <sys/time.h>
uint64_t
uepoch(void)
{
struct timeval tv;
gettimeofday(&tv, NULL);
return 1000000LL * tv.tv_sec + tv.tv_usec;
}
#elif defined(_WIN32)
#include <windows.h>
uint64_t
uepoch(void)
{
FILETIME ft;
GetSystemTimeAsFileTime(&ft);
uint64_t tt = ft.dwHighDateTime;
tt <<= 32;
tt |= ft.dwLowDateTime;
tt /=10;
tt -= UINT64_C(11644473600000000);
return tt;
}
#endif
static uint64_t
rand64(uint64_t s[4])
{
uint64_t x = s[1] * 5;
uint64_t r = ((x << 7) | (x >> 57)) * 9;
uint64_t t = s[1] << 17;
s[2] ^= s[0];
s[3] ^= s[1];
s[1] ^= s[2];
s[0] ^= s[3];
s[2] ^= t;
s[3] = (s[3] << 45) | (s[3] >> 19);
return r;
}
struct hash {
uint32_t c[HASHN];
char s[HASHN + 1];
};
static void
hash_gen(struct hash *h, uint64_t rng[4])
{
for (int i = 0; i < HASHN; i++)
h->c[i] = (rand64(rng) >> 32) | 1u;
for (int i = 0; i <= HASHN; i++)
h->s[i] = 16;
}
static int
hash_equal(const struct hash *a, const struct hash *b)
{
for (int i = 0; i < HASHN; i++) {
if (a->c[i] != b->c[i])
return 0;
if (a->s[i] != b->s[i])
return 0;
}
return a->s[HASHN] == b->s[HASHN];
}
static void
hash_print(const struct hash *h)
{
putchar('[');
for (int i = 0; i < HASHN; i++)
printf("%2d %08lx ", h->s[i], (unsigned long)h->c[i]);
printf("%2d]", h->s[HASHN]);
fflush(stdout);
}
static int
hash_parse(struct hash *h, char *str)
{
long s;
unsigned long c;
char *end, *tok;
if (*str != '[')
return 0;
str++;
for (int i = 0; i < HASHN; i++) {
tok = strtok(i ? 0 : str, " ");
s = strtol(tok, &end, 10);
if (s < 1 || s > 31 || !(*end == 0 || *end == ' '))
return 0;
h->s[i] = s;
tok = strtok(0, " ");
c = strtoul(tok, &end, 16);
if (c > 0xffffffffUL || !(*end == 0 || *end == ' '))
return 0;
h->c[i] = c;
}
tok = strtok(0, "]");
s = strtol(tok, &end, 10);
if (s < 1 || s > 31 || *end)
return 0;
h->s[HASHN] = s;
return 1;
}
static uint32_t
hash(const struct hash *h, uint32_t x)
{
for (int i = 0; i < HASHN; i++) {
x ^= x >> h->s[i];
x *= h->c[i];
}
x ^= x >> h->s[HASHN];
return x;
}
static double
estimate_bias32(const struct hash *f, uint64_t rng[4])
{
long n = 1L << QUALITY;
long bins[32][32] = {{0}};
for (long i = 0; i < n; i++) {
uint32_t x = rand64(rng);
uint32_t h0 = hash(f, x);
for (int j = 0; j < 32; j++) {
uint32_t bit = UINT32_C(1) << j;
uint32_t h1 = hash(f, x ^ bit);
uint32_t set = h0 ^ h1;
for (int k = 0; k < 32; k++)
bins[j][k] += (set >> k) & 1;
}
}
double mean = 0;
for (int j = 0; j < 32; j++) {
for (int k = 0; k < 32; k++) {
double diff = (bins[j][k] - n / 2) / (n / 2.0);
mean += (diff * diff) / (32 * 32);
}
}
return sqrt(mean) * 1000.0;
}
#define EXACT_SPLIT 32 // must be power of two
static double
exact_bias32(const struct hash *f)
{
int i; // declare here to work around Visual Studio issue
long long bins[32][32] = {{0}};
static const uint64_t range = (UINT64_C(1) << 32) / EXACT_SPLIT;
#pragma omp parallel for
for (i = 0; i < EXACT_SPLIT; i++) {
long long b[32][32] = {{0}};
for (uint64_t x = i * range; x < (i + 1) * range; x++) {
uint32_t h0 = hash(f, x);
for (int j = 0; j < 32; j++) {
uint32_t bit = UINT32_C(1) << j;
uint32_t h1 = hash(f, x ^ bit);
uint32_t set = h0 ^ h1;
for (int k = 0; k < 32; k++)
b[j][k] += (set >> k) & 1;
}
}
#pragma omp critical
for (int j = 0; j < 32; j++)
for (int k = 0; k < 32; k++)
bins[j][k] += b[j][k];
}
double mean = 0.0;
for (int j = 0; j < 32; j++) {
for (int k = 0; k < 32; k++) {
double diff = (bins[j][k] - 2147483648L) / 2147483648.0;
mean += (diff * diff) / (32 * 32);
}
}
return sqrt(mean) * 1000.0;
}
static void
hash_gen_strict(struct hash *h, uint64_t rng[4])
{
do
hash_gen(h, rng);
while (estimate_bias32(h, rng) > THRESHOLD);
}
static uint64_t
load64(const void *buf)
{
const unsigned char *p = buf;
return (uint64_t)p[0] << 0 |
(uint64_t)p[1] << 8 |
(uint64_t)p[2] << 16 |
(uint64_t)p[3] << 24 |
(uint64_t)p[4] << 32 |
(uint64_t)p[5] << 40 |
(uint64_t)p[6] << 48 |
(uint64_t)p[7] << 56;
}
static uint64_t
mix64(uint64_t x, uint64_t y)
{
uint64_t r = 0x2b8a130976726633 * x - 0xb28cbd28446adb17 * y;
r ^= r >> 32;
return r;
}
static uint64_t
hash64(uint64_t x, uint64_t m)
{
x *= m;
x ^= x >> 32;
return x;
}
static void
mix64x4(uint64_t x[4])
{
uint64_t i = 0xf81db9ba6dabee4e;
uint64_t m = 0xb1d9e3fbc08321db;
x[0] = hash64(x[0] + UINT64_C(0x347534cdcf0982b6), m);
x[1] = hash64(x[1] + UINT64_C(0x975e2ee8f0f23aa8), m += i);
x[2] = hash64(x[2] + UINT64_C(0x7baf736c6c769a0b), m += i);
x[3] = hash64(x[3] + UINT64_C(0x884afc96accb90d9), m += i);
#define ROUND64(a, b, c, d) \
x[b] = mix64(hash64(x[a], m += i), x[b]); \
x[c] = mix64(hash64(x[a], m += i), x[c]); \
x[d] = mix64(hash64(x[a], m += i), x[d])
ROUND64(0, 1, 2, 3);
ROUND64(1, 0, 2, 3);
ROUND64(2, 0, 1, 3);
ROUND64(3, 0, 1, 3);
#undef ROUND64
}
static void
rng_init(uint64_t rng[4])
{
void *p = malloc(1024L * 1024);
rng[0] = uepoch();
rng[1] = (uint64_t)rng_init;
rng[2] = (uint64_t)rng;
rng[3] = (uint64_t)p;
free(p);
mix64x4(rng);
}
/* Modular multiplicative inverse (32-bit) */
static uint32_t
modinv32(uint32_t x)
{
uint32_t a = x;
x += x - a * x * x;
x += x - a * x * x;
x += x - a * x * x;
x += x - a * x * x;
x += x - a * x * x;
return x;
}
static void
usage(FILE *f)
{
fprintf(f, "usage: hillclimb [-EhIqs] [-p INIT] [-x SEED]\n");
fprintf(f, " -E Evaluate given pattern (-p)\n");
fprintf(f, " -h Print this message and exit\n");
fprintf(f, " -I Invert given pattern (-p) an quit\n");
fprintf(f, " -p INIT Provide an initial hash function\n");
fprintf(f, " -q Print less information (quiet)\n");
fprintf(f, " -s Quit after finding a local minima\n");
fprintf(f, " -x SEED Seed PRNG from a string (up to 32 bytes)\n");
}
int
main(int argc, char **argv)
{
int seeded = 0;
uint64_t rng[4];
struct hash cur, last = {0};
int generate = 1;
int one_shot = 0;
int quiet = 0;
int invert = 0;
int evaluate = 0;
double cur_score = -1;
int option;
while ((option = getopt(argc, argv, "EhIp:qsx:")) != -1) {
switch (option) {
case 'E': {
evaluate = 1;
} break;
case 'h': {
usage(stdout);
exit(EXIT_SUCCESS);
} break;
case 'I': {
invert = 1;
} break;
case 'p': {
if (!hash_parse(&cur, optarg)) {
fprintf(stderr, "hillclimb: invalid pattern: %s\n", optarg);
exit(EXIT_FAILURE);
}
generate = 0;
} break;
case 'q': {
quiet++;
} break;
case 's': {
one_shot = 1;
} break;
case 'x': {
unsigned char buf[32] = {0};
size_t len = strlen(optarg);
if (len > sizeof(buf)) {
fprintf(stderr, "hillclimb: seed too long (> 32 bytes)\n");
exit(EXIT_FAILURE);
}
memcpy(buf, optarg, len);
rng[0] = load64(buf + 0);
rng[1] = load64(buf + 8);
rng[2] = load64(buf + 16);
rng[3] = load64(buf + 24);
mix64x4(rng);
seeded = 1;
} break;
default:
usage(stderr);
exit(EXIT_FAILURE);
}
}
if (invert) {
if (generate) {
fprintf(stderr, "hillclimb: -I requires -p\n");
exit(EXIT_FAILURE);
}
printf("uint32_t\nhash_r(uint32_t x)\n{\n");
for (int i = 0; i < HASHN * 2 + 1; i++) {
switch (i & 1) {
case 0: {
int s = HASHN - i / 2;
printf(" x ^=");
for (int i = cur.s[s]; i < 32; i += cur.s[s])
printf(" %sx >> %d", i == cur.s[s] ? "" : "^ ", i);
printf(";\n");
} break;
case 1: {
int c = HASHN - (i + 1) / 2;
unsigned long inv = modinv32(cur.c[c]);
printf(" x *= UINT32_C(0x%08lx);\n", inv);
} break;
}
}
printf(" return x;\n}\n");
exit(EXIT_SUCCESS);
}
if (evaluate) {
if (generate) {
fprintf(stderr, "hillclimb: -E requires -p\n");
exit(EXIT_FAILURE);
}
hash_print(&cur);
printf(" = %.17g\n", exact_bias32(&cur));
exit(EXIT_SUCCESS);
}
if (!seeded)
rng_init(rng);
if (generate)
hash_gen_strict(&cur, rng);
for (;;) {
int found = 0;
struct hash best;
double best_score;
if (quiet < 2)
hash_print(&cur);
if (cur_score < 0)
cur_score = exact_bias32(&cur);
if (quiet < 2)
printf(" = %.17g\n", cur_score);
best = cur;
best_score = cur_score;
/* Explore around shifts */
for (int i = 0; i <= HASHN; i++) {
/* In theory the shift could drift above 31 or below 1, but
* in practice it would never get this far since these would
* be terrible hashes.
*/
for (int d = -SHIFT_RANGE; d <= +SHIFT_RANGE; d++) {
if (d == 0) continue;
struct hash tmp = cur;
tmp.s[i] += d;
if (hash_equal(&tmp, &last)) continue;
if (quiet <= 0) {
printf(" ");
hash_print(&tmp);
}
double score = exact_bias32(&tmp);
if (quiet <= 0)
printf(" = %.17g\n", score);
if (score < best_score) {
best_score = score;
best = tmp;
found = 1;
}
}
}
/* Explore around constants */
for (int i = 0; i < HASHN; i++) {
for (int d = -CONST_RANGE; d <= +CONST_RANGE; d += 2) {
if (d == 0) continue;
struct hash tmp = cur;
tmp.c[i] += d;
if (hash_equal(&tmp, &last)) continue;
if (quiet <= 0) {
printf(" ");
hash_print(&tmp);
}
double score = exact_bias32(&tmp);
if (quiet <= 0)
printf(" = %.17g\n", score);
if (score < best_score) {
best_score = score;
best = tmp;
found = 1;
}
}
}
if (found) {
/* Move to the lowest item found */
if (quiet < 1)
puts("CLIMB");
last = cur;
cur = best;
cur_score = best_score;
} else if (one_shot) {
/* Hit local minima, exit */
if (quiet < 1)
puts("DONE");
hash_print(&cur);
printf(" = %.17g\n", cur_score);
break;
} else {
/* Hit local minima, reset */
if (quiet < 1)
puts("RESET");
hash_print(&cur);
printf(" = %.17g\n", cur_score);
last.s[0] = 0; // set to invalid
hash_gen_strict(&cur, rng);
cur_score = -1;
}
}
}
|
full_buffer.c | #include "correctness-checking-partitioned-impl.h"
#include "mpi.h"
#include <stdio.h>
#include <stdlib.h>
#define TOTAL_SIZE 4000
#define ITERATIONS 10
#define TAG 42
//buffer:
// RECV SEND LOCAL SEND RECV
// TOTAL_SIZE must be at least 4 times STENCIL_SIZE
void debug_function(long a, long b) {
printf(" %ld,%ld\n", a, b);
}
int main(int argc, char **argv) {
MPI_Init(&argc, &argv);
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
//int pre = (rank == 0) ? -1 : rank - 1;
//int nxt = (rank == size - 1) ? -1 : rank + 1;
int nxt = (rank + 1) % size;
int pre = (rank - 1) % size;
pre = pre<0?size+pre:pre;// if % is negative: "start counting backwards at size"
//printf("Rank %i in comm with %d and %d\n", rank, pre,nxt);
int *buffer = (int*) malloc(sizeof(int) * TOTAL_SIZE);
int *buffer_r = (int*) malloc(sizeof(int) * TOTAL_SIZE);
// buffer access
#pragma omp parallel for firstprivate(buffer) schedule(static,1000)
//#pragma omp parallel for firstprivate(buffer) schedule(dynamic,1000)
//#pragma omp parallel for
for (int i = 0; i < TOTAL_SIZE; ++i) {
buffer[i] = i * rank;
}
// communication
// no deadlock
MPI_Request req;
printf("Rank %d recv from %d send to %d\n",rank,pre,nxt);
MPI_Irecv(buffer_r,
TOTAL_SIZE, MPI_INT, pre, TAG,
MPI_COMM_WORLD, &req);
MPI_Send(buffer, TOTAL_SIZE, MPI_INT, nxt, TAG,
MPI_COMM_WORLD);
MPI_Wait(&req, MPI_STATUS_IGNORE);
free(buffer);
free(buffer_r);
MPI_Finalize();
}
|
reduction_min_max.c | // Contributed by Franke Ye @gatech.edu
#include <stdio.h>
int main()
{
int a[10];
int i, mn, mx;
#pragma omp parallel for reduction(min:mn)
for(i=0 ; i<10 ; i++ )
mn = mn < a[i] ? mn : a[i];
#pragma omp parallel for reduction(max:mx)
for(i=0 ; i<10 ; i++ )
mx = mx > a[i] ? mx : a[i];
return 0;
}
|
displacement_lagrangemultiplier_residual_frictional_contact_criteria.h | // KRATOS ___| | | |
// \___ \ __| __| | | __| __| | | __| _` | |
// | | | | | ( | | | | ( | |
// _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS
//
// License: BSD License
// license: StructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_FRICTIONAL_CONTACT_CRITERIA_H)
#define KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_FRICTIONAL_CONTACT_CRITERIA_H
/* System includes */
/* External includes */
/* Project includes */
#include "utilities/table_stream_utility.h"
#include "custom_strategies/custom_convergencecriterias/base_mortar_criteria.h"
#include "utilities/color_utilities.h"
#include "custom_utilities/active_set_utilities.h"
#include "utilities/constraint_utilities.h"
#include "custom_utilities/contact_utilities.h"
namespace Kratos
{
///@addtogroup ContactStructuralMechanicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@name Kratos Classes
///@{
/**
* @class DisplacementLagrangeMultiplierResidualFrictionalContactCriteria
* @ingroup ContactStructuralMechanicsApplication
* @brief Convergence criteria for contact problems (only for frictional cases)
* This class implements a convergence control based on nodal displacement and
* lagrange multiplier values. The error is evaluated separately for each of them, and
* relative and absolute tolerances for both must be specified.
* @author Vicente Mataix Ferrandiz
*/
template< class TSparseSpace,
class TDenseSpace >
class DisplacementLagrangeMultiplierResidualFrictionalContactCriteria
: public ConvergenceCriteria< TSparseSpace, TDenseSpace >
{
public:
///@name Type Definitions
///@{
/// Pointer definition of DisplacementLagrangeMultiplierResidualFrictionalContactCriteria
KRATOS_CLASS_POINTER_DEFINITION( DisplacementLagrangeMultiplierResidualFrictionalContactCriteria );
/// Local Flags
KRATOS_DEFINE_LOCAL_FLAG( ENSURE_CONTACT );
KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT );
KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED );
KRATOS_DEFINE_LOCAL_FLAG( PURE_SLIP );
KRATOS_DEFINE_LOCAL_FLAG( INITIAL_RESIDUAL_IS_SET );
KRATOS_DEFINE_LOCAL_FLAG( INITIAL_NORMAL_RESIDUAL_IS_SET );
KRATOS_DEFINE_LOCAL_FLAG( INITIAL_STICK_RESIDUAL_IS_SET );
KRATOS_DEFINE_LOCAL_FLAG( INITIAL_SLIP_RESIDUAL_IS_SET );
/// The base class definition (and it subclasses)
typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
/// The sparse space used
typedef TSparseSpace SparseSpaceType;
/// The r_table stream definition TODO: Replace by logger
typedef TableStreamUtility::Pointer TablePrinterPointerType;
/// The index type definition
typedef std::size_t IndexType;
/// The key type definition
typedef std::size_t KeyType;
/// Zero tolerance definition
static constexpr double ZeroTolerance = std::numeric_limits<double>::epsilon();
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor
* @param DispRatioTolerance Relative tolerance for displacement residual error
* @param DispAbsTolerance Absolute tolerance for displacement residual error
* @param LMRatioTolerance Relative tolerance for lagrange multiplier residual error
* @param LMAbsTolerance Absolute tolerance for lagrange multiplier residual error
* @param EnsureContact To check if the contact is lost
* @param NormalTangentRatio Ratio between the normal and tangent that will accepted as converged
* @param pTable The pointer to the output r_table
* @param PrintingOutput If the output is going to be printed in a txt file
*/
explicit DisplacementLagrangeMultiplierResidualFrictionalContactCriteria(
const TDataType DispRatioTolerance,
const TDataType DispAbsTolerance,
const TDataType LMNormalRatioTolerance,
const TDataType LMNormalAbsTolerance,
const TDataType LMTangentStickRatioTolerance,
const TDataType LMTangentStickAbsTolerance,
const TDataType LMTangentSlipRatioTolerance,
const TDataType LMTangentSlipAbsTolerance,
const TDataType NormalTangentRatio,
const bool EnsureContact = false,
const bool PureSlip = false,
const bool PrintingOutput = false
) : BaseType()
{
// Set local flags
mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::ENSURE_CONTACT, EnsureContact);
mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::PRINTING_OUTPUT, PrintingOutput);
mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::PURE_SLIP, PureSlip);
mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_NORMAL_RESIDUAL_IS_SET, false);
mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_STICK_RESIDUAL_IS_SET, false);
mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_SLIP_RESIDUAL_IS_SET, false);
// The displacement residual
mDispRatioTolerance = DispRatioTolerance;
mDispAbsTolerance = DispAbsTolerance;
// The normal contact residual
mLMNormalRatioTolerance = LMNormalRatioTolerance;
mLMNormalAbsTolerance = LMNormalAbsTolerance;
// The tangent contact residual
mLMTangentStickRatioTolerance = LMTangentStickRatioTolerance;
mLMTangentStickAbsTolerance = LMTangentStickAbsTolerance;
mLMTangentSlipRatioTolerance = LMTangentSlipRatioTolerance;
mLMTangentSlipAbsTolerance = LMTangentSlipAbsTolerance;
// We get the ratio between the normal and tangent that will accepted as converged
mNormalTangentRatio = NormalTangentRatio;
}
/**
* @brief Default constructor (parameters)
* @param ThisParameters The configuration parameters
*/
explicit DisplacementLagrangeMultiplierResidualFrictionalContactCriteria( Parameters ThisParameters = Parameters(R"({})"))
: BaseType()
{
// The default parameters
Parameters default_parameters = Parameters(R"(
{
"ensure_contact" : false,
"pure_slip" : false,
"print_convergence_criterion" : false,
"residual_relative_tolerance" : 1.0e-4,
"residual_absolute_tolerance" : 1.0e-9,
"contact_residual_relative_tolerance" : 1.0e-4,
"contact_residual_absolute_tolerance" : 1.0e-9,
"frictional_stick_contact_residual_relative_tolerance" : 1.0e-4,
"frictional_stick_contact_residual_absolute_tolerance" : 1.0e-9,
"frictional_slip_contact_residual_relative_tolerance" : 1.0e-4,
"frictional_slip_contact_residual_absolute_tolerance" : 1.0e-9
})" );
ThisParameters.ValidateAndAssignDefaults(default_parameters);
// The displacement residual
mDispRatioTolerance = ThisParameters["residual_relative_tolerance"].GetDouble();
mDispAbsTolerance = ThisParameters["residual_absolute_tolerance"].GetDouble();
// The normal contact residual
mLMNormalRatioTolerance = ThisParameters["contact_displacement_absolute_tolerance"].GetDouble();
mLMNormalAbsTolerance = ThisParameters["contact_residual_absolute_tolerance"].GetDouble();
// The tangent contact residual
mLMTangentStickRatioTolerance = ThisParameters["frictional_stick_contact_residual_relative_tolerance"].GetDouble();
mLMTangentStickAbsTolerance = ThisParameters["frictional_stick_contact_residual_absolute_tolerance"].GetDouble();
mLMTangentSlipRatioTolerance = ThisParameters["frictional_slip_contact_residual_relative_tolerance"].GetDouble();
mLMTangentSlipAbsTolerance = ThisParameters["frictional_slip_contact_residual_absolute_tolerance"].GetDouble();
// We get the ratio between the normal and tangent that will accepted as converged
mNormalTangentRatio = ThisParameters["ratio_normal_tangent_threshold"].GetDouble();
// Set local flags
mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::ENSURE_CONTACT, ThisParameters["ensure_contact"].GetBool());
mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool());
mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::PURE_SLIP, ThisParameters["pure_slip"].GetBool());
mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_NORMAL_RESIDUAL_IS_SET, false);
mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_STICK_RESIDUAL_IS_SET, false);
mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_SLIP_RESIDUAL_IS_SET, false);
}
// Copy constructor.
DisplacementLagrangeMultiplierResidualFrictionalContactCriteria( DisplacementLagrangeMultiplierResidualFrictionalContactCriteria const& rOther )
:BaseType(rOther)
,mOptions(rOther.mOptions)
,mDispRatioTolerance(rOther.mDispRatioTolerance)
,mDispAbsTolerance(rOther.mDispAbsTolerance)
,mDispInitialResidualNorm(rOther.mDispInitialResidualNorm)
,mDispCurrentResidualNorm(rOther.mDispCurrentResidualNorm)
,mLMNormalRatioTolerance(rOther.mLMNormalRatioTolerance)
,mLMNormalAbsTolerance(rOther.mLMNormalAbsTolerance)
,mLMNormalInitialResidualNorm(rOther.mLMNormalInitialResidualNorm)
,mLMNormalCurrentResidualNorm(rOther.mLMNormalCurrentResidualNorm)
,mLMTangentStickRatioTolerance(rOther.mLMTangentStickRatioTolerance)
,mLMTangentStickAbsTolerance(rOther.mLMTangentStickAbsTolerance)
,mLMTangentSlipRatioTolerance(rOther.mLMTangentSlipRatioTolerance)
,mLMTangentSlipAbsTolerance(rOther.mLMTangentSlipAbsTolerance)
,mLMTangentStickInitialResidualNorm(rOther.mLMTangentStickInitialResidualNorm)
,mLMTangentStickCurrentResidualNorm(rOther.mLMTangentStickCurrentResidualNorm)
,mStickCounter(rOther.mStickCounter)
,mSlipCounter(rOther.mSlipCounter)
,mNormalTangentRatio(rOther.mNormalTangentRatio)
{
}
/// Destructor.
~DisplacementLagrangeMultiplierResidualFrictionalContactCriteria() override = default;
///@}
///@name Operators
///@{
/**
* @brief Compute relative and absolute error.
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PostCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
if (SparseSpaceType::Size(rb) != 0) { //if we are solving for something
// Getting process info
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// Initialize
TDataType disp_residual_solution_norm = 0.0, normal_lm_residual_solution_norm = 0.0, tangent_lm_stick_residual_solution_norm = 0.0, tangent_lm_slip_residual_solution_norm = 0.0;
IndexType disp_dof_num(0),lm_dof_num(0), lm_stick_dof_num(0), lm_slip_dof_num(0);
// The nodes array
auto& r_nodes_array = rModelPart.Nodes();
// First iterator
const auto it_dof_begin = rDofSet.begin();
// Auxiliar values
std::size_t dof_id = 0;
TDataType residual_dof_value = 0.0;
// The number of active dofs
const std::size_t number_active_dofs = rb.size();
// Loop over Dofs
#pragma omp parallel for firstprivate(dof_id,residual_dof_value) reduction(+:disp_residual_solution_norm, normal_lm_residual_solution_norm, tangent_lm_stick_residual_solution_norm, tangent_lm_slip_residual_solution_norm, disp_dof_num, lm_dof_num, lm_stick_dof_num, lm_slip_dof_num)
for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) {
auto it_dof = it_dof_begin + i;
dof_id = it_dof->EquationId();
// Check dof id is solved
if (dof_id < number_active_dofs) {
if (mActiveDofs[dof_id] == 1) {
// The component of the residual
residual_dof_value = rb[dof_id];
const auto& r_curr_var = it_dof->GetVariable();
if (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_X) {
// The normal of the node (TODO: how to solve this without accesing all the time to the database?)
const auto it_node = r_nodes_array.find(it_dof->Id());
const double mu = it_node->GetValue(FRICTION_COEFFICIENT);
if (mu < ZeroTolerance) {
normal_lm_residual_solution_norm += std::pow(residual_dof_value, 2);
} else {
const double normal_x = it_node->FastGetSolutionStepValue(NORMAL_X);
const TDataType normal_comp_residual = residual_dof_value * normal_x;
normal_lm_residual_solution_norm += std::pow(normal_comp_residual, 2);
if (it_node->Is(SLIP) || mOptions.Is(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::PURE_SLIP)) {
tangent_lm_slip_residual_solution_norm += std::pow(residual_dof_value - normal_comp_residual, 2);
++lm_slip_dof_num;
} else {
tangent_lm_stick_residual_solution_norm += std::pow(residual_dof_value - normal_comp_residual, 2);
++lm_stick_dof_num;
}
}
++lm_dof_num;
} else if (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Y) {
// The normal of the node (TODO: how to solve this without accesing all the time to the database?)
const auto it_node = r_nodes_array.find(it_dof->Id());
const double mu = it_node->GetValue(FRICTION_COEFFICIENT);
if (mu < ZeroTolerance) {
normal_lm_residual_solution_norm += std::pow(residual_dof_value, 2);
} else {
const double normal_y = it_node->FastGetSolutionStepValue(NORMAL_Y);
const TDataType normal_comp_residual = residual_dof_value * normal_y;
normal_lm_residual_solution_norm += std::pow(normal_comp_residual, 2);
if (it_node->Is(SLIP) || mOptions.Is(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::PURE_SLIP)) {
tangent_lm_slip_residual_solution_norm += std::pow(residual_dof_value - normal_comp_residual, 2);
++lm_slip_dof_num;
} else {
tangent_lm_stick_residual_solution_norm += std::pow(residual_dof_value - normal_comp_residual, 2);
++lm_stick_dof_num;
}
}
++lm_dof_num;
} else if (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Z) {
// The normal of the node (TODO: how to solve this without accesing all the time to the database?)
const auto it_node = r_nodes_array.find(it_dof->Id());
const double mu = it_node->GetValue(FRICTION_COEFFICIENT);
if (mu < ZeroTolerance) {
normal_lm_residual_solution_norm += std::pow(residual_dof_value, 2);
} else {
const double normal_z = it_node->FastGetSolutionStepValue(NORMAL_Z);
const TDataType normal_comp_residual = residual_dof_value * normal_z;
normal_lm_residual_solution_norm += std::pow(normal_comp_residual, 2);
if (it_node->Is(SLIP) || mOptions.Is(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::PURE_SLIP)) {
tangent_lm_slip_residual_solution_norm += std::pow(residual_dof_value - normal_comp_residual, 2);
++lm_slip_dof_num;
} else {
tangent_lm_stick_residual_solution_norm += std::pow(residual_dof_value - normal_comp_residual, 2);
++lm_stick_dof_num;
}
}
++lm_dof_num;
} else { // We will assume is displacement dof
disp_residual_solution_norm += residual_dof_value * residual_dof_value;
++disp_dof_num;
}
}
}
}
// Auxiliar dofs counters
if (mStickCounter > 0) {
if (lm_stick_dof_num == 0) {
mStickCounter = 0;
mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_STICK_RESIDUAL_IS_SET, false);
}
} else {
if (lm_stick_dof_num > 0) {
mStickCounter = lm_stick_dof_num;
mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_STICK_RESIDUAL_IS_SET, false);
}
}
if (mSlipCounter > 0) {
if (lm_slip_dof_num == 0) {
mSlipCounter = 0;
mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_SLIP_RESIDUAL_IS_SET, false);
}
} else {
if (lm_slip_dof_num > 0) {
mSlipCounter = lm_slip_dof_num;
mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_SLIP_RESIDUAL_IS_SET, false);
}
}
mDispCurrentResidualNorm = disp_residual_solution_norm;
mLMNormalCurrentResidualNorm = normal_lm_residual_solution_norm;
mLMTangentStickCurrentResidualNorm = tangent_lm_stick_residual_solution_norm;
mLMTangentSlipCurrentResidualNorm = tangent_lm_slip_residual_solution_norm;
TDataType residual_disp_ratio = 1.0;
TDataType residual_normal_lm_ratio = 1.0;
TDataType residual_tangent_lm_stick_ratio = 1.0;
TDataType residual_tangent_lm_slip_ratio = 1.0;
// We initialize the solution
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_RESIDUAL_IS_SET)) {
mDispInitialResidualNorm = (disp_residual_solution_norm < ZeroTolerance) ? 1.0 : disp_residual_solution_norm;
residual_disp_ratio = 1.0;
mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_RESIDUAL_IS_SET, true);
}
// We calculate the ratio of the displacements
residual_disp_ratio = mDispCurrentResidualNorm/mDispInitialResidualNorm;
// We initialize the solution
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_NORMAL_RESIDUAL_IS_SET)) {
mLMNormalInitialResidualNorm = (normal_lm_residual_solution_norm < ZeroTolerance) ? 1.0 : normal_lm_residual_solution_norm;
residual_normal_lm_ratio = 1.0;
mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_NORMAL_RESIDUAL_IS_SET, true);
}
// We calculate the ratio of the normal LM
residual_normal_lm_ratio = mLMNormalCurrentResidualNorm/mLMNormalInitialResidualNorm;
// We initialize the solution
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_STICK_RESIDUAL_IS_SET) && lm_stick_dof_num > 0) {
mLMTangentStickInitialResidualNorm = (tangent_lm_stick_residual_solution_norm < ZeroTolerance) ? 1.0 : tangent_lm_stick_residual_solution_norm;
residual_tangent_lm_stick_ratio = 1.0;
mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_STICK_RESIDUAL_IS_SET, true);
}
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_SLIP_RESIDUAL_IS_SET) && lm_slip_dof_num > 0) {
mLMTangentSlipInitialResidualNorm = (tangent_lm_slip_residual_solution_norm < ZeroTolerance) ? 1.0 : tangent_lm_slip_residual_solution_norm;
residual_tangent_lm_slip_ratio = 1.0;
mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_SLIP_RESIDUAL_IS_SET, true);
}
// We calculate the ratio of the tangent LM
if (lm_stick_dof_num > 0) {
residual_tangent_lm_stick_ratio = mLMTangentStickCurrentResidualNorm/mLMTangentStickInitialResidualNorm;
} else {
residual_tangent_lm_stick_ratio = 0.0;
}
if (lm_slip_dof_num > 0) {
residual_tangent_lm_slip_ratio = mLMTangentSlipCurrentResidualNorm/mLMTangentSlipInitialResidualNorm;
} else {
residual_tangent_lm_slip_ratio = 0.0;
}
KRATOS_ERROR_IF(mOptions.Is(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::ENSURE_CONTACT) && residual_normal_lm_ratio < ZeroTolerance) << "ERROR::CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl;
// We calculate the absolute norms
const TDataType residual_disp_abs = mDispCurrentResidualNorm/static_cast<TDataType>(disp_dof_num);
const TDataType residual_normal_lm_abs = mLMNormalCurrentResidualNorm/static_cast<TDataType>(lm_dof_num);
const TDataType residual_tangent_lm_stick_abs = lm_stick_dof_num > 0 ? mLMTangentStickCurrentResidualNorm/static_cast<TDataType>(lm_dof_num) : 0.0;
// const TDataType residual_tangent_lm_stick_abs = lm_stick_dof_num > 0 ? mLMTangentStickCurrentResidualNorm/static_cast<TDataType>(lm_stick_dof_num) : 0.0;
const TDataType residual_tangent_lm_slip_abs = lm_slip_dof_num > 0 ? mLMTangentSlipCurrentResidualNorm/static_cast<TDataType>(lm_dof_num) : 0.0;
// const TDataType residual_tangent_lm_slip_abs = lm_slip_dof_num > 0 ? mLMTangentSlipCurrentResidualNorm/static_cast<TDataType>(lm_slip_dof_num) : 0.0;
const TDataType normal_tangent_stick_ratio = residual_tangent_lm_stick_abs/residual_normal_lm_abs;
const TDataType normal_tangent_slip_ratio = residual_tangent_lm_slip_abs/residual_normal_lm_abs;
// We print the results // TODO: Replace for the new log
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
std::cout.precision(4);
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::PURE_SLIP)) {
r_table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance << residual_normal_lm_ratio << mLMNormalRatioTolerance << residual_normal_lm_abs << mLMNormalAbsTolerance << residual_tangent_lm_stick_ratio << mLMTangentStickRatioTolerance << residual_tangent_lm_stick_abs << mLMTangentStickAbsTolerance << residual_tangent_lm_slip_ratio << mLMTangentSlipRatioTolerance << residual_tangent_lm_slip_abs << mLMTangentSlipAbsTolerance;
} else {
r_table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance << residual_normal_lm_ratio << mLMNormalRatioTolerance << residual_normal_lm_abs << mLMNormalAbsTolerance << residual_tangent_lm_slip_ratio << mLMTangentSlipRatioTolerance << residual_tangent_lm_slip_abs << mLMTangentSlipAbsTolerance;
}
} else {
std::cout.precision(4);
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::PRINTING_OUTPUT)) {
KRATOS_INFO("DisplacementLagrangeMultiplierResidualFrictionalContactCriteria") << BOLDFONT("RESIDUAL CONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific;
KRATOS_INFO("DisplacementLagrangeMultiplierResidualFrictionalContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << residual_disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << residual_disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierResidualFrictionalContactCriteria") << BOLDFONT("\tNORMAL LAGRANGE MUL: RATIO = ") << residual_normal_lm_ratio << BOLDFONT(" EXP.RATIO = ") << mLMNormalRatioTolerance << BOLDFONT(" ABS = ") << residual_normal_lm_abs << BOLDFONT(" EXP.ABS = ") << mLMNormalAbsTolerance << std::endl;
KRATOS_INFO_IF("DisplacementLagrangeMultiplierResidualFrictionalContactCriteria", mOptions.IsNot(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::PURE_SLIP)) << BOLDFONT("\tSTICK LAGRANGE MUL: RATIO = ") << residual_tangent_lm_stick_ratio << BOLDFONT(" EXP.RATIO = ") << mLMTangentStickRatioTolerance << BOLDFONT(" ABS = ") << residual_tangent_lm_stick_abs << BOLDFONT(" EXP.ABS = ") << mLMTangentStickAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierResidualFrictionalContactCriteria") << BOLDFONT("\tSLIP LAGRANGE MUL: RATIO = ") << residual_tangent_lm_slip_ratio << BOLDFONT(" EXP.RATIO = ") << mLMTangentSlipRatioTolerance << BOLDFONT(" ABS = ") << residual_tangent_lm_slip_abs << BOLDFONT(" EXP.ABS = ") << mLMTangentSlipAbsTolerance << std::endl;
} else {
KRATOS_INFO("DisplacementLagrangeMultiplierResidualFrictionalContactCriteria") << "RESIDUAL CONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific;
KRATOS_INFO("DisplacementLagrangeMultiplierResidualFrictionalContactCriteria") << "\tDISPLACEMENT: RATIO = " << residual_disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << residual_disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierResidualFrictionalContactCriteria") << "\tNORMAL LAGRANGE MUL: RATIO = " << residual_normal_lm_ratio << " EXP.RATIO = " << mLMNormalRatioTolerance << " ABS = " << residual_normal_lm_abs << " EXP.ABS = " << mLMNormalAbsTolerance << std::endl;
KRATOS_INFO_IF("DisplacementLagrangeMultiplierResidualFrictionalContactCriteria", mOptions.IsNot(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::PURE_SLIP)) << "\tSTICK LAGRANGE MUL: RATIO = " << residual_tangent_lm_stick_ratio << " EXP.RATIO = " << mLMTangentStickRatioTolerance << " ABS = " << residual_tangent_lm_stick_abs << " EXP.ABS = " << mLMTangentStickAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierResidualFrictionalContactCriteria") << "\tSLIP LAGRANGE MUL: RATIO = " << residual_tangent_lm_slip_ratio << " EXP.RATIO = " << mLMTangentSlipRatioTolerance << " ABS = " << residual_tangent_lm_slip_abs << " EXP.ABS = " << mLMTangentSlipAbsTolerance << std::endl;
}
}
}
// NOTE: Here we don't include the tangent counter part
r_process_info[CONVERGENCE_RATIO] = (residual_disp_ratio > residual_normal_lm_ratio) ? residual_disp_ratio : residual_normal_lm_ratio;
r_process_info[RESIDUAL_NORM] = (residual_normal_lm_abs > mLMNormalAbsTolerance) ? residual_normal_lm_abs : mLMNormalAbsTolerance;
// We check if converged
const bool disp_converged = (residual_disp_ratio <= mDispRatioTolerance || residual_disp_abs <= mDispAbsTolerance);
const bool lm_converged = (mOptions.IsNot(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::ENSURE_CONTACT) && residual_normal_lm_ratio == 0.0) ? true : (residual_normal_lm_ratio <= mLMNormalRatioTolerance || residual_normal_lm_abs <= mLMNormalAbsTolerance) && (residual_tangent_lm_stick_ratio <= mLMTangentStickRatioTolerance || residual_tangent_lm_stick_abs <= mLMTangentStickAbsTolerance || normal_tangent_stick_ratio <= mNormalTangentRatio) && (residual_tangent_lm_slip_ratio <= mLMTangentSlipRatioTolerance || residual_tangent_lm_slip_abs <= mLMTangentSlipAbsTolerance || normal_tangent_slip_ratio <= mNormalTangentRatio);
if (disp_converged && lm_converged ) {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::PRINTING_OUTPUT))
r_table << BOLDFONT(FGRN(" Achieved"));
else
r_table << "Achieved";
} else {
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementLagrangeMultiplierResidualFrictionalContactCriteria") << BOLDFONT("\tResidual") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl;
else
KRATOS_INFO("DisplacementLagrangeMultiplierResidualFrictionalContactCriteria") << "\tResidual convergence is achieved" << std::endl;
}
}
return true;
} else {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::PRINTING_OUTPUT))
r_table << BOLDFONT(FRED(" Not achieved"));
else
r_table << "Not achieved";
} else {
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementLagrangeMultiplierResidualFrictionalContactCriteria") << BOLDFONT("\tResidual") << " convergence is " << BOLDFONT(FRED(" not achieved")) << std::endl;
else
KRATOS_INFO("DisplacementLagrangeMultiplierResidualFrictionalContactCriteria") << "\tResidual convergence is not achieved" << std::endl;
}
}
return false;
}
} else { // In this case all the displacements are imposed!
return true;
}
}
/**
* @brief This function initialize the convergence criteria
* @param rModelPart Reference to the ModelPart containing the contact problem. (unused)
*/
void Initialize( ModelPart& rModelPart) override
{
BaseType::mConvergenceCriteriaIsInitialized = true;
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::TABLE_IS_INITIALIZED)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
r_table.AddColumn("DP RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
r_table.AddColumn("N.LM RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::PURE_SLIP)) {
r_table.AddColumn("STI. RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
}
r_table.AddColumn("SLIP RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
r_table.AddColumn("CONVERGENCE", 15);
mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::TABLE_IS_INITIALIZED, true);
}
}
/**
* @brief This function initializes the solution step
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
// Initialize flags
mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_NORMAL_RESIDUAL_IS_SET, false);
mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_STICK_RESIDUAL_IS_SET, false);
mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_SLIP_RESIDUAL_IS_SET, false);
// Filling mActiveDofs when MPC exist
ConstraintUtilities::ComputeActiveDofs(rModelPart, mActiveDofs, rDofSet);
}
/**
* @brief This function finalizes the non-linear iteration
* @param rModelPart Reference to the ModelPart containing the problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual + reactions)
*/
void FinalizeNonLinearIteration(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
// Calling base criteria
BaseType::FinalizeNonLinearIteration(rModelPart, rDofSet, rA, rDx, rb);
// The current process info
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
r_process_info.SetValue(ACTIVE_SET_COMPUTED, false);
}
///@}
///@name Operations
///@{
///@}
///@name Acces
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
Flags mOptions; /// Local flags
TDataType mDispRatioTolerance; /// The ratio threshold for the norm of the displacement residual
TDataType mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement residual
TDataType mDispInitialResidualNorm; /// The reference norm of the displacement residual
TDataType mDispCurrentResidualNorm; /// The current norm of the displacement residual
TDataType mLMNormalRatioTolerance; /// The ratio threshold for the norm of the normal LM residual
TDataType mLMNormalAbsTolerance; /// The absolute value threshold for the norm of the normal LM residual
TDataType mLMNormalInitialResidualNorm; /// The reference norm of the normal LM residual
TDataType mLMNormalCurrentResidualNorm; /// The current norm of the normal LM residual
TDataType mLMTangentStickRatioTolerance; /// The ratio threshold for the norm of the tangent LM residual (stick)
TDataType mLMTangentStickAbsTolerance; /// The absolute value threshold for the norm of the tangent LM residual (stick)
TDataType mLMTangentSlipRatioTolerance; /// The ratio threshold for the norm of the tangent LM residual (slip)
TDataType mLMTangentSlipAbsTolerance; /// The absolute value threshold for the norm of the tangent LM residual (slip)
TDataType mLMTangentStickInitialResidualNorm; /// The reference norm of the tangent LM residual (stick)
TDataType mLMTangentStickCurrentResidualNorm; /// The current norm of the tangent LM residual (stick)
TDataType mLMTangentSlipInitialResidualNorm; /// The reference norm of the tangent LM residual (slip)
TDataType mLMTangentSlipCurrentResidualNorm; /// The current norm of the tangent LM residual (slip)
std::size_t mStickCounter = 0; /// This is an auxiliar counter for stick dofs
std::size_t mSlipCounter = 0; /// This is an auxiliar counter for slip dofs
TDataType mNormalTangentRatio; /// The ratio to accept a non converged tangent component in case
std::vector<int> mActiveDofs; /// This vector contains the dofs that are active
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Unaccessible methods
///@{
///@}
}; // Kratos DisplacementLagrangeMultiplierResidualFrictionalContactCriteria
///@name Local flags creation
///@{
/// Local Flags
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualFrictionalContactCriteria<TSparseSpace, TDenseSpace>::ENSURE_CONTACT(Kratos::Flags::Create(0));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualFrictionalContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualFrictionalContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualFrictionalContactCriteria<TSparseSpace, TDenseSpace>::PURE_SLIP(Kratos::Flags::Create(3));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualFrictionalContactCriteria<TSparseSpace, TDenseSpace>::INITIAL_RESIDUAL_IS_SET(Kratos::Flags::Create(4));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualFrictionalContactCriteria<TSparseSpace, TDenseSpace>::INITIAL_NORMAL_RESIDUAL_IS_SET(Kratos::Flags::Create(5));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualFrictionalContactCriteria<TSparseSpace, TDenseSpace>::INITIAL_STICK_RESIDUAL_IS_SET(Kratos::Flags::Create(6));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualFrictionalContactCriteria<TSparseSpace, TDenseSpace>::INITIAL_SLIP_RESIDUAL_IS_SET(Kratos::Flags::Create(7));
}
#endif /* KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_FRICTIONAL_CONTACT_CRITERIA_H */
|
word2vec.c | // Copyright 2013 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <pthread.h>
#define MAX_STRING 100
#define EXP_TABLE_SIZE 1000
#define MAX_EXP 6
#define MAX_SENTENCE_LENGTH 1000
#define MAX_CODE_LENGTH 40
const int vocab_hash_size = 30000000; // Maximum 30 * 0.7 = 21M words in the vocabulary
typedef float real; // Precision of float numbers
struct vocab_word {
long long cn;
int *point;
char *word, *code, codelen;
};
char train_file[MAX_STRING], output_file[MAX_STRING];
char save_vocab_file[MAX_STRING], read_vocab_file[MAX_STRING];
struct vocab_word *vocab;
int binary = 0, cbow = 1, debug_mode = 2, window = 5, min_count = 5, num_threads = 12, min_reduce = 1;
int *vocab_hash;
long long vocab_max_size = 1000, vocab_size = 0, layer1_size = 100;
long long train_words = 0, word_count_actual = 0, iter = 5, file_size = 0, classes = 0;
real alpha = 0.025, starting_alpha, sample = 1e-3;
real *syn0, *syn1, *syn1neg, *expTable;
clock_t start;
int hs = 0, negative = 5;
const int table_size = 1e8;
int *table;
void InitUnigramTable() {
int a, i;
double train_words_pow = 0;
double d1, power = 0.75;
table = (int *)malloc(table_size * sizeof(int));
for (a = 0; a < vocab_size; a++) train_words_pow += pow(vocab[a].cn, power);
i = 0;
d1 = pow(vocab[i].cn, power) / train_words_pow;
for (a = 0; a < table_size; a++) {
table[a] = i;
if (a / (double)table_size > d1) {
i++;
d1 += pow(vocab[i].cn, power) / train_words_pow;
}
if (i >= vocab_size) i = vocab_size - 1;
}
}
// Reads a single word from a file, assuming space + tab + EOL to be word boundaries
void ReadWord(char *word, FILE *fin) {
int a = 0, ch;
while (!feof(fin)) {
ch = fgetc(fin);
if (ch == 13) continue;
if ((ch == ' ') || (ch == '\t') || (ch == '\n')) {
if (a > 0) {
if (ch == '\n') ungetc(ch, fin);
break;
}
if (ch == '\n') {
strcpy(word, (char *)"</s>");
return;
} else continue;
}
word[a] = ch;
a++;
if (a >= MAX_STRING - 1) a--; // Truncate too long words
}
word[a] = 0;
}
// Returns hash value of a word
int GetWordHash(char *word) {
unsigned long long a, hash = 0;
for (a = 0; a < strlen(word); a++) hash = hash * 257 + word[a];
hash = hash % vocab_hash_size;
return hash;
}
// Returns position of a word in the vocabulary; if the word is not found, returns -1
int SearchVocab(char *word) {
unsigned int hash = GetWordHash(word);
while (1) {
if (vocab_hash[hash] == -1) return -1;
if (!strcmp(word, vocab[vocab_hash[hash]].word)) return vocab_hash[hash];
hash = (hash + 1) % vocab_hash_size;
}
return -1;
}
// Reads a word and returns its index in the vocabulary
int ReadWordIndex(FILE *fin) {
char word[MAX_STRING];
ReadWord(word, fin);
if (feof(fin)) return -1;
return SearchVocab(word);
}
// Adds a word to the vocabulary
int AddWordToVocab(char *word) {
unsigned int hash, length = strlen(word) + 1;
if (length > MAX_STRING) length = MAX_STRING;
vocab[vocab_size].word = (char *)calloc(length, sizeof(char));
strcpy(vocab[vocab_size].word, word);
vocab[vocab_size].cn = 0;
vocab_size++;
// Reallocate memory if needed
if (vocab_size + 2 >= vocab_max_size) {
vocab_max_size += 1000;
vocab = (struct vocab_word *)realloc(vocab, vocab_max_size * sizeof(struct vocab_word));
}
hash = GetWordHash(word);
while (vocab_hash[hash] != -1) hash = (hash + 1) % vocab_hash_size;
vocab_hash[hash] = vocab_size - 1;
return vocab_size - 1;
}
// Used later for sorting by word counts
int VocabCompare(const void *a, const void *b) {
return ((struct vocab_word *)b)->cn - ((struct vocab_word *)a)->cn;
}
// Sorts the vocabulary by frequency using word counts
void SortVocab() {
int a, size;
unsigned int hash;
// Sort the vocabulary and keep </s> at the first position
qsort(&vocab[1], vocab_size - 1, sizeof(struct vocab_word), VocabCompare);
for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1;
size = vocab_size;
train_words = 0;
for (a = 0; a < size; a++) {
// Words occuring less than min_count times will be discarded from the vocab
if ((vocab[a].cn < min_count) && (a != 0)) {
vocab_size--;
free(vocab[a].word);
} else {
// Hash will be re-computed, as after the sorting it is not actual
hash=GetWordHash(vocab[a].word);
while (vocab_hash[hash] != -1) hash = (hash + 1) % vocab_hash_size;
vocab_hash[hash] = a;
train_words += vocab[a].cn;
}
}
vocab = (struct vocab_word *)realloc(vocab, (vocab_size + 1) * sizeof(struct vocab_word));
// Allocate memory for the binary tree construction
for (a = 0; a < vocab_size; a++) {
vocab[a].code = (char *)calloc(MAX_CODE_LENGTH, sizeof(char));
vocab[a].point = (int *)calloc(MAX_CODE_LENGTH, sizeof(int));
}
}
// Reduces the vocabulary by removing infrequent tokens
void ReduceVocab() {
int a, b = 0;
unsigned int hash;
for (a = 0; a < vocab_size; a++) if (vocab[a].cn > min_reduce) {
vocab[b].cn = vocab[a].cn;
vocab[b].word = vocab[a].word;
b++;
} else free(vocab[a].word);
vocab_size = b;
for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1;
for (a = 0; a < vocab_size; a++) {
// Hash will be re-computed, as it is not actual
hash = GetWordHash(vocab[a].word);
while (vocab_hash[hash] != -1) hash = (hash + 1) % vocab_hash_size;
vocab_hash[hash] = a;
}
fflush(stdout);
min_reduce++;
}
// Create binary Huffman tree using the word counts
// Frequent words will have short uniqe binary codes
void CreateBinaryTree() {
long long a, b, i, min1i, min2i, pos1, pos2, point[MAX_CODE_LENGTH];
char code[MAX_CODE_LENGTH];
long long *count = (long long *)calloc(vocab_size * 2 + 1, sizeof(long long));
long long *binary = (long long *)calloc(vocab_size * 2 + 1, sizeof(long long));
long long *parent_node = (long long *)calloc(vocab_size * 2 + 1, sizeof(long long));
for (a = 0; a < vocab_size; a++) count[a] = vocab[a].cn;
for (a = vocab_size; a < vocab_size * 2; a++) count[a] = 1e15;
pos1 = vocab_size - 1;
pos2 = vocab_size;
// Following algorithm constructs the Huffman tree by adding one node at a time
for (a = 0; a < vocab_size - 1; a++) {
// First, find two smallest nodes 'min1, min2'
if (pos1 >= 0) {
if (count[pos1] < count[pos2]) {
min1i = pos1;
pos1--;
} else {
min1i = pos2;
pos2++;
}
} else {
min1i = pos2;
pos2++;
}
if (pos1 >= 0) {
if (count[pos1] < count[pos2]) {
min2i = pos1;
pos1--;
} else {
min2i = pos2;
pos2++;
}
} else {
min2i = pos2;
pos2++;
}
count[vocab_size + a] = count[min1i] + count[min2i];
parent_node[min1i] = vocab_size + a;
parent_node[min2i] = vocab_size + a;
binary[min2i] = 1;
}
// Now assign binary code to each vocabulary word
for (a = 0; a < vocab_size; a++) {
b = a;
i = 0;
while (1) {
code[i] = binary[b];
point[i] = b;
i++;
b = parent_node[b];
if (b == vocab_size * 2 - 2) break;
}
vocab[a].codelen = i;
vocab[a].point[0] = vocab_size - 2;
for (b = 0; b < i; b++) {
vocab[a].code[i - b - 1] = code[b];
vocab[a].point[i - b] = point[b] - vocab_size;
}
}
free(count);
free(binary);
free(parent_node);
}
void LearnVocabFromTrainFile() {
char word[MAX_STRING];
FILE *fin;
long long a, i;
for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1;
fin = fopen(train_file, "rb");
if (fin == NULL) {
printf("ERROR: training data file not found!\n");
exit(1);
}
vocab_size = 0;
AddWordToVocab((char *)"</s>");
while (1) {
ReadWord(word, fin);
if (feof(fin)) break;
train_words++;
if ((debug_mode > 1) && (train_words % 100000 == 0)) {
printf("%lldK%c", train_words / 1000, 13);
fflush(stdout);
}
i = SearchVocab(word);
if (i == -1) {
a = AddWordToVocab(word);
vocab[a].cn = 1;
} else vocab[i].cn++;
if (vocab_size > vocab_hash_size * 0.7) ReduceVocab();
}
SortVocab();
if (debug_mode > 0) {
printf("Vocab size: %lld\n", vocab_size);
printf("Words in train file: %lld\n", train_words);
}
file_size = ftell(fin);
fclose(fin);
}
void SaveVocab() {
long long i;
FILE *fo = fopen(save_vocab_file, "wb");
for (i = 0; i < vocab_size; i++) fprintf(fo, "%s %lld\n", vocab[i].word, vocab[i].cn);
fclose(fo);
}
void ReadVocab() {
long long a, i = 0;
char c;
char word[MAX_STRING];
FILE *fin = fopen(read_vocab_file, "rb");
if (fin == NULL) {
printf("Vocabulary file not found\n");
exit(1);
}
for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1;
vocab_size = 0;
while (1) {
ReadWord(word, fin);
if (feof(fin)) break;
a = AddWordToVocab(word);
fscanf(fin, "%lld%c", &vocab[a].cn, &c);
i++;
}
SortVocab();
if (debug_mode > 0) {
printf("Vocab size: %lld\n", vocab_size);
printf("Words in train file: %lld\n", train_words);
}
fin = fopen(train_file, "rb");
if (fin == NULL) {
printf("ERROR: training data file not found!\n");
exit(1);
}
fseek(fin, 0, SEEK_END);
file_size = ftell(fin);
fclose(fin);
}
void InitNet() {
long long a, b;
unsigned long long next_random = 1;
a = posix_memalign((void **)&syn0, 128, (long long)vocab_size * layer1_size * sizeof(real));
if (syn0 == NULL) {printf("Memory allocation failed\n"); exit(1);}
if (hs) {
a = posix_memalign((void **)&syn1, 128, (long long)vocab_size * layer1_size * sizeof(real));
if (syn1 == NULL) {printf("Memory allocation failed\n"); exit(1);}
for (a = 0; a < vocab_size; a++) for (b = 0; b < layer1_size; b++)
syn1[a * layer1_size + b] = 0;
}
if (negative>0) {
a = posix_memalign((void **)&syn1neg, 128, (long long)vocab_size * layer1_size * sizeof(real));
if (syn1neg == NULL) {printf("Memory allocation failed\n"); exit(1);}
for (a = 0; a < vocab_size; a++) for (b = 0; b < layer1_size; b++)
syn1neg[a * layer1_size + b] = 0;
}
for (a = 0; a < vocab_size; a++) for (b = 0; b < layer1_size; b++) {
next_random = next_random * (unsigned long long)25214903917 + 11;
syn0[a * layer1_size + b] = (((next_random & 0xFFFF) / (real)65536) - 0.5) / layer1_size;
}
CreateBinaryTree();
}
void *TrainModelThread(void *id) {
long long a, b, d, cw, word, last_word, sentence_length = 0, sentence_position = 0;
long long word_count = 0, last_word_count = 0, sen[MAX_SENTENCE_LENGTH + 1];
long long l1, l2, c, target, label, local_iter = iter;
unsigned long long next_random = (long long)id;
real f, g;
clock_t now;
real *neu1 = (real *)calloc(layer1_size, sizeof(real));
real *neu1e = (real *)calloc(layer1_size, sizeof(real));
FILE *fi = fopen(train_file, "rb");
fseek(fi, file_size / (long long)num_threads * (long long)id, SEEK_SET);
while (1) {
if (word_count - last_word_count > 10000) {
word_count_actual += word_count - last_word_count;
last_word_count = word_count;
if ((debug_mode > 1)) {
now=clock();
printf("%cAlpha: %f Progress: %.1f%% Words/thread/sec: %.1fk ", 13, alpha,
word_count_actual / (real)(iter * train_words + 1) * 100,
word_count_actual / ((real)(now - start + 1) / (real)CLOCKS_PER_SEC * 1000));
fflush(stdout);
}
alpha = starting_alpha * (1 - word_count_actual / (real)(iter * train_words + 1));
if (alpha < starting_alpha * 0.0001) alpha = starting_alpha * 0.0001;
}
if (sentence_length == 0) {
while (1) {
word = ReadWordIndex(fi);
if (feof(fi)) break;
if (word == -1) continue;
word_count++;
if (word == 0) break;
// The subsampling randomly discards frequent words while keeping the ranking same
if (sample > 0) {
real ran = (sqrt(vocab[word].cn / (sample * train_words)) + 1) * (sample * train_words) / vocab[word].cn;
next_random = next_random * (unsigned long long)25214903917 + 11;
if (ran < (next_random & 0xFFFF) / (real)65536) continue;
}
sen[sentence_length] = word;
sentence_length++;
if (sentence_length >= MAX_SENTENCE_LENGTH) break;
}
sentence_position = 0;
}
if (feof(fi) || (word_count > train_words / num_threads)) {
word_count_actual += word_count - last_word_count;
local_iter--;
if (local_iter == 0) break;
word_count = 0;
last_word_count = 0;
sentence_length = 0;
fseek(fi, file_size / (long long)num_threads * (long long)id, SEEK_SET);
continue;
}
word = sen[sentence_position];
if (word == -1) continue;
for (c = 0; c < layer1_size; c++) neu1[c] = 0;
for (c = 0; c < layer1_size; c++) neu1e[c] = 0;
next_random = next_random * (unsigned long long)25214903917 + 11;
b = next_random % window;
if (cbow) { //train the cbow architecture
// in -> hidden
cw = 0;
for (a = b; a < window * 2 + 1 - b; a++) if (a != window) {
c = sentence_position - window + a;
if (c < 0) continue;
if (c >= sentence_length) continue;
last_word = sen[c];
if (last_word == -1) continue;
for (c = 0; c < layer1_size; c++) neu1[c] += syn0[c + last_word * layer1_size];
cw++;
}
if (cw) {
for (c = 0; c < layer1_size; c++) neu1[c] /= cw;
if (hs) for (d = 0; d < vocab[word].codelen; d++) {
f = 0;
l2 = vocab[word].point[d] * layer1_size;
// Propagate hidden -> output
for (c = 0; c < layer1_size; c++) f += neu1[c] * syn1[c + l2];
if (f <= -MAX_EXP) continue;
else if (f >= MAX_EXP) continue;
else f = expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
// 'g' is the gradient multiplied by the learning rate
g = (1 - vocab[word].code[d] - f) * alpha;
// Propagate errors output -> hidden
for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1[c + l2];
// Learn weights hidden -> output
for (c = 0; c < layer1_size; c++) syn1[c + l2] += g * neu1[c];
}
// NEGATIVE SAMPLING
if (negative > 0) for (d = 0; d < negative + 1; d++) {
if (d == 0) {
target = word;
label = 1;
} else {
next_random = next_random * (unsigned long long)25214903917 + 11;
target = table[(next_random >> 16) % table_size];
if (target == 0) target = next_random % (vocab_size - 1) + 1;
if (target == word) continue;
label = 0;
}
l2 = target * layer1_size;
f = 0;
for (c = 0; c < layer1_size; c++) f += neu1[c] * syn1neg[c + l2];
if (f > MAX_EXP) g = (label - 1) * alpha;
else if (f < -MAX_EXP) g = (label - 0) * alpha;
else g = (label - expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]) * alpha;
for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1neg[c + l2];
for (c = 0; c < layer1_size; c++) syn1neg[c + l2] += g * neu1[c];
}
// hidden -> in
for (a = b; a < window * 2 + 1 - b; a++) if (a != window) {
c = sentence_position - window + a;
if (c < 0) continue;
if (c >= sentence_length) continue;
last_word = sen[c];
if (last_word == -1) continue;
for (c = 0; c < layer1_size; c++) syn0[c + last_word * layer1_size] += neu1e[c];
}
}
} else { //train skip-gram
for (a = b; a < window * 2 + 1 - b; a++) if (a != window) {
c = sentence_position - window + a;
if (c < 0) continue;
if (c >= sentence_length) continue;
last_word = sen[c];
if (last_word == -1) continue;
l1 = last_word * layer1_size;
for (c = 0; c < layer1_size; c++) neu1e[c] = 0;
// HIERARCHICAL SOFTMAX
if (hs) for (d = 0; d < vocab[word].codelen; d++) {
f = 0;
l2 = vocab[word].point[d] * layer1_size;
// Propagate hidden -> output
for (c = 0; c < layer1_size; c++) f += syn0[c + l1] * syn1[c + l2];
if (f <= -MAX_EXP) continue;
else if (f >= MAX_EXP) continue;
else f = expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
// 'g' is the gradient multiplied by the learning rate
g = (1 - vocab[word].code[d] - f) * alpha;
// Propagate errors output -> hidden
for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1[c + l2];
// Learn weights hidden -> output
for (c = 0; c < layer1_size; c++) syn1[c + l2] += g * syn0[c + l1];
}
// NEGATIVE SAMPLING
if (negative > 0) for (d = 0; d < negative + 1; d++) {
if (d == 0) {
target = word;
label = 1;
} else {
next_random = next_random * (unsigned long long)25214903917 + 11;
target = table[(next_random >> 16) % table_size];
if (target == 0) target = next_random % (vocab_size - 1) + 1;
if (target == word) continue;
label = 0;
}
l2 = target * layer1_size;
f = 0;
for (c = 0; c < layer1_size; c++) f += syn0[c + l1] * syn1neg[c + l2];
if (f > MAX_EXP) g = (label - 1) * alpha;
else if (f < -MAX_EXP) g = (label - 0) * alpha;
else g = (label - expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]) * alpha;
for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1neg[c + l2];
for (c = 0; c < layer1_size; c++) syn1neg[c + l2] += g * syn0[c + l1];
}
// Learn weights input -> hidden
for (c = 0; c < layer1_size; c++) syn0[c + l1] += neu1e[c];
}
}
sentence_position++;
if (sentence_position >= sentence_length) {
sentence_length = 0;
continue;
}
}
fclose(fi);
free(neu1);
free(neu1e);
pthread_exit(NULL);
}
void TrainModel() {
long a, b, c, d;
FILE *fo;
pthread_t *pt = (pthread_t *)malloc(num_threads * sizeof(pthread_t));
printf("Starting training using file %s\n", train_file);
starting_alpha = alpha;
if (read_vocab_file[0] != 0) ReadVocab(); else LearnVocabFromTrainFile();
if (save_vocab_file[0] != 0) SaveVocab();
if (output_file[0] == 0) return;
InitNet();
if (negative > 0) InitUnigramTable();
start = clock();
for (a = 0; a < num_threads; a++) pthread_create(&pt[a], NULL, TrainModelThread, (void *)a);
for (a = 0; a < num_threads; a++) pthread_join(pt[a], NULL);
fo = fopen(output_file, "wb");
if (classes == 0) {
// Save the word vectors
fprintf(fo, "%lld %lld\n", vocab_size, layer1_size);
for (a = 0; a < vocab_size; a++) {
fprintf(fo, "%s ", vocab[a].word);
if (binary) for (b = 0; b < layer1_size; b++) fwrite(&syn0[a * layer1_size + b], sizeof(real), 1, fo);
else for (b = 0; b < layer1_size; b++) fprintf(fo, "%lf ", syn0[a * layer1_size + b]);
fprintf(fo, "\n");
}
} else {
// Run K-means on the word vectors
int clcn = classes, iter = 10, closeid;
int *centcn = (int *)malloc(classes * sizeof(int));
int *cl = (int *)calloc(vocab_size, sizeof(int));
real closev, x;
real *cent = (real *)calloc(classes * layer1_size, sizeof(real));
for (a = 0; a < vocab_size; a++) cl[a] = a % clcn;
for (a = 0; a < iter; a++) {
for (b = 0; b < clcn * layer1_size; b++) cent[b] = 0;
for (b = 0; b < clcn; b++) centcn[b] = 1;
for (c = 0; c < vocab_size; c++) {
for (d = 0; d < layer1_size; d++) cent[layer1_size * cl[c] + d] += syn0[c * layer1_size + d];
centcn[cl[c]]++;
}
for (b = 0; b < clcn; b++) {
closev = 0;
for (c = 0; c < layer1_size; c++) {
cent[layer1_size * b + c] /= centcn[b];
closev += cent[layer1_size * b + c] * cent[layer1_size * b + c];
}
closev = sqrt(closev);
for (c = 0; c < layer1_size; c++) cent[layer1_size * b + c] /= closev;
}
for (c = 0; c < vocab_size; c++) {
closev = -10;
closeid = 0;
//#pragma omp parallel for private(d) shared(x,b,c) num_threads(4) reduction(max:x) reduction()
for (d = 0; d < clcn; d++) {
x = 0;
for (b = 0; b < layer1_size; b++) x += cent[layer1_size * d + b] * syn0[c * layer1_size + b];
if (x > closev) {
closev = x;
closeid = d;
}
}
cl[c] = closeid;
}
}
// Save the K-means classes
for (a = 0; a < vocab_size; a++) fprintf(fo, "%s %d\n", vocab[a].word, cl[a]);
free(centcn);
free(cent);
free(cl);
}
fclose(fo);
}
int ArgPos(char *str, int argc, char **argv) {
int a;
for (a = 1; a < argc; a++) if (!strcmp(str, argv[a])) {
if (a == argc - 1) {
printf("Argument missing for %s\n", str);
exit(1);
}
return a;
}
return -1;
}
int main(int argc, char **argv) {
int i;
if (argc == 1) {
printf("WORD VECTOR estimation toolkit v 0.1c\n\n");
printf("Options:\n");
printf("Parameters for training:\n");
printf("\t-train <file>\n");
printf("\t\tUse text data from <file> to train the model\n");
printf("\t-output <file>\n");
printf("\t\tUse <file> to save the resulting word vectors / word clusters\n");
printf("\t-size <int>\n");
printf("\t\tSet size of word vectors; default is 100\n");
printf("\t-window <int>\n");
printf("\t\tSet max skip length between words; default is 5\n");
printf("\t-sample <float>\n");
printf("\t\tSet threshold for occurrence of words. Those that appear with higher frequency in the training data\n");
printf("\t\twill be randomly down-sampled; default is 1e-3, useful range is (0, 1e-5)\n");
printf("\t-hs <int>\n");
printf("\t\tUse Hierarchical Softmax; default is 0 (not used)\n");
printf("\t-negative <int>\n");
printf("\t\tNumber of negative examples; default is 5, common values are 3 - 10 (0 = not used)\n");
printf("\t-threads <int>\n");
printf("\t\tUse <int> threads (default 12)\n");
printf("\t-iter <int>\n");
printf("\t\tRun more training iterations (default 5)\n");
printf("\t-min-count <int>\n");
printf("\t\tThis will discard words that appear less than <int> times; default is 5\n");
printf("\t-alpha <float>\n");
printf("\t\tSet the starting learning rate; default is 0.025 for skip-gram and 0.05 for CBOW\n");
printf("\t-classes <int>\n");
printf("\t\tOutput word classes rather than word vectors; default number of classes is 0 (vectors are written)\n");
printf("\t-debug <int>\n");
printf("\t\tSet the debug mode (default = 2 = more info during training)\n");
printf("\t-binary <int>\n");
printf("\t\tSave the resulting vectors in binary moded; default is 0 (off)\n");
printf("\t-save-vocab <file>\n");
printf("\t\tThe vocabulary will be saved to <file>\n");
printf("\t-read-vocab <file>\n");
printf("\t\tThe vocabulary will be read from <file>, not constructed from the training data\n");
printf("\t-cbow <int>\n");
printf("\t\tUse the continuous bag of words model; default is 1 (use 0 for skip-gram model)\n");
printf("\nExamples:\n");
printf("./word2vec -train data.txt -output vec.txt -size 200 -window 5 -sample 1e-4 -negative 5 -hs 0 -binary 0 -cbow 1 -iter 3\n\n");
return 0;
}
output_file[0] = 0;
save_vocab_file[0] = 0;
read_vocab_file[0] = 0;
if ((i = ArgPos((char *)"-size", argc, argv)) > 0) layer1_size = atoi(argv[i + 1]);
if ((i = ArgPos((char *)"-train", argc, argv)) > 0) strcpy(train_file, argv[i + 1]);
if ((i = ArgPos((char *)"-save-vocab", argc, argv)) > 0) strcpy(save_vocab_file, argv[i + 1]);
if ((i = ArgPos((char *)"-read-vocab", argc, argv)) > 0) strcpy(read_vocab_file, argv[i + 1]);
if ((i = ArgPos((char *)"-debug", argc, argv)) > 0) debug_mode = atoi(argv[i + 1]);
if ((i = ArgPos((char *)"-binary", argc, argv)) > 0) binary = atoi(argv[i + 1]);
if ((i = ArgPos((char *)"-cbow", argc, argv)) > 0) cbow = atoi(argv[i + 1]);
if (cbow) alpha = 0.05;
if ((i = ArgPos((char *)"-alpha", argc, argv)) > 0) alpha = atof(argv[i + 1]);
if ((i = ArgPos((char *)"-output", argc, argv)) > 0) strcpy(output_file, argv[i + 1]);
if ((i = ArgPos((char *)"-window", argc, argv)) > 0) window = atoi(argv[i + 1]);
if ((i = ArgPos((char *)"-sample", argc, argv)) > 0) sample = atof(argv[i + 1]);
if ((i = ArgPos((char *)"-hs", argc, argv)) > 0) hs = atoi(argv[i + 1]);
if ((i = ArgPos((char *)"-negative", argc, argv)) > 0) negative = atoi(argv[i + 1]);
if ((i = ArgPos((char *)"-threads", argc, argv)) > 0) num_threads = atoi(argv[i + 1]);
if ((i = ArgPos((char *)"-iter", argc, argv)) > 0) iter = atoi(argv[i + 1]);
if ((i = ArgPos((char *)"-min-count", argc, argv)) > 0) min_count = atoi(argv[i + 1]);
if ((i = ArgPos((char *)"-classes", argc, argv)) > 0) classes = atoi(argv[i + 1]);
vocab = (struct vocab_word *)calloc(vocab_max_size, sizeof(struct vocab_word));
vocab_hash = (int *)calloc(vocab_hash_size, sizeof(int));
expTable = (real *)malloc((EXP_TABLE_SIZE + 1) * sizeof(real));
for (i = 0; i < EXP_TABLE_SIZE; i++) {
expTable[i] = exp((i / (real)EXP_TABLE_SIZE * 2 - 1) * MAX_EXP); // Precompute the exp() table
expTable[i] = expTable[i] / (expTable[i] + 1); // Precompute f(x) = x / (x + 1)
}
TrainModel();
return 0;
}
|
data.h | /*!
* Copyright (c) 2015 by Contributors
* \file data.h
* \brief The input data structure of xgboost.
* \author Tianqi Chen
*/
#ifndef XGBOOST_DATA_H_
#define XGBOOST_DATA_H_
#include <dmlc/base.h>
#include <dmlc/data.h>
#include <rabit/rabit.h>
#include <xgboost/base.h>
#include <xgboost/span.h>
#include <xgboost/host_device_vector.h>
#include <memory>
#include <numeric>
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
namespace xgboost {
// forward declare dmatrix.
class DMatrix;
/*! \brief data type accepted by xgboost interface */
enum class DataType : uint8_t {
kFloat32 = 1,
kDouble = 2,
kUInt32 = 3,
kUInt64 = 4
};
/*!
* \brief Meta information about dataset, always sit in memory.
*/
class MetaInfo {
public:
/*! \brief number of data fields in MetaInfo */
static constexpr uint64_t kNumField = 7;
/*! \brief number of rows in the data */
uint64_t num_row_{0};
/*! \brief number of columns in the data */
uint64_t num_col_{0};
/*! \brief number of nonzero entries in the data */
uint64_t num_nonzero_{0};
/*! \brief label of each instance */
HostDeviceVector<bst_float> labels_;
/*!
* \brief the index of begin and end of a group
* needed when the learning task is ranking.
*/
std::vector<bst_group_t> group_ptr_;
/*! \brief weights of each instance, optional */
HostDeviceVector<bst_float> weights_;
/*!
* \brief initialized margins,
* if specified, xgboost will start from this init margin
* can be used to specify initial prediction to boost from.
*/
HostDeviceVector<bst_float> base_margin_;
/*! \brief default constructor */
MetaInfo() = default;
MetaInfo& operator=(MetaInfo const& that) {
this->num_row_ = that.num_row_;
this->num_col_ = that.num_col_;
this->num_nonzero_ = that.num_nonzero_;
this->labels_.Resize(that.labels_.Size());
this->labels_.Copy(that.labels_);
this->group_ptr_ = that.group_ptr_;
this->weights_.Resize(that.weights_.Size());
this->weights_.Copy(that.weights_);
this->base_margin_.Resize(that.base_margin_.Size());
this->base_margin_.Copy(that.base_margin_);
return *this;
}
/*!
* \brief Get weight of each instances.
* \param i Instance index.
* \return The weight.
*/
inline bst_float GetWeight(size_t i) const {
return weights_.Size() != 0 ? weights_.HostVector()[i] : 1.0f;
}
/*! \brief get sorted indexes (argsort) of labels by absolute value (used by cox loss) */
inline const std::vector<size_t>& LabelAbsSort() const {
if (label_order_cache_.size() == labels_.Size()) {
return label_order_cache_;
}
label_order_cache_.resize(labels_.Size());
std::iota(label_order_cache_.begin(), label_order_cache_.end(), 0);
const auto& l = labels_.HostVector();
XGBOOST_PARALLEL_SORT(label_order_cache_.begin(), label_order_cache_.end(),
[&l](size_t i1, size_t i2) {return std::abs(l[i1]) < std::abs(l[i2]);});
return label_order_cache_;
}
/*! \brief clear all the information */
void Clear();
/*!
* \brief Load the Meta info from binary stream.
* \param fi The input stream
*/
void LoadBinary(dmlc::Stream* fi);
/*!
* \brief Save the Meta info to binary stream
* \param fo The output stream.
*/
void SaveBinary(dmlc::Stream* fo) const;
/*!
* \brief Set information in the meta info.
* \param key The key of the information.
* \param dptr The data pointer of the source array.
* \param dtype The type of the source data.
* \param num Number of elements in the source array.
*/
void SetInfo(const char* key, const void* dptr, DataType dtype, size_t num);
/*!
* \brief Set information in the meta info with array interface.
* \param key The key of the information.
* \param interface_str String representation of json format array interface.
*
* [ column_0, column_1, ... column_n ]
*
* Right now only 1 column is permitted.
*/
void SetInfo(const char* key, std::string const& interface_str);
private:
/*! \brief argsort of labels */
mutable std::vector<size_t> label_order_cache_;
};
/*! \brief Element from a sparse vector */
struct Entry {
/*! \brief feature index */
bst_feature_t index;
/*! \brief feature value */
bst_float fvalue;
/*! \brief default constructor */
Entry() = default;
/*!
* \brief constructor with index and value
* \param index The feature or row index.
* \param fvalue The feature value.
*/
XGBOOST_DEVICE Entry(bst_feature_t index, bst_float fvalue) : index(index), fvalue(fvalue) {}
/*! \brief reversely compare feature values */
inline static bool CmpValue(const Entry& a, const Entry& b) {
return a.fvalue < b.fvalue;
}
inline bool operator==(const Entry& other) const {
return (this->index == other.index && this->fvalue == other.fvalue);
}
};
/*!
* \brief Parameters for constructing batches.
*/
struct BatchParam {
/*! \brief The GPU device to use. */
int gpu_id;
/*! \brief Maximum number of bins per feature for histograms. */
int max_bin { 0 };
/*! \brief Number of rows in a GPU batch, used for finding quantiles on GPU. */
int gpu_batch_nrows;
/*! \brief Page size for external memory mode. */
size_t gpu_page_size;
BatchParam() = default;
BatchParam(int32_t device, int32_t max_bin, int32_t gpu_batch_nrows,
size_t gpu_page_size = 0) :
gpu_id{device},
max_bin{max_bin},
gpu_batch_nrows{gpu_batch_nrows},
gpu_page_size{gpu_page_size}
{}
inline bool operator!=(const BatchParam& other) const {
return gpu_id != other.gpu_id ||
max_bin != other.max_bin ||
gpu_batch_nrows != other.gpu_batch_nrows ||
gpu_page_size != other.gpu_page_size;
}
};
/*!
* \brief In-memory storage unit of sparse batch, stored in CSR format.
*/
class SparsePage {
public:
// Offset for each row.
HostDeviceVector<bst_row_t> offset;
/*! \brief the data of the segments */
HostDeviceVector<Entry> data;
size_t base_rowid{};
/*! \brief an instance of sparse vector in the batch */
using Inst = common::Span<Entry const>;
/*! \brief get i-th row from the batch */
inline Inst operator[](size_t i) const {
const auto& data_vec = data.HostVector();
const auto& offset_vec = offset.HostVector();
size_t size;
// in distributed mode, some partitions may not get any instance for a feature. Therefore
// we should set the size as zero
if (rabit::IsDistributed() && i + 1 >= offset_vec.size()) {
size = 0;
} else {
size = offset_vec[i + 1] - offset_vec[i];
}
return {data_vec.data() + offset_vec[i],
static_cast<Inst::index_type>(size)};
}
/*! \brief constructor */
SparsePage() {
this->Clear();
}
/*! \return Number of instances in the page. */
inline size_t Size() const {
return offset.Size() == 0 ? 0 : offset.Size() - 1;
}
/*! \return estimation of memory cost of this page */
inline size_t MemCostBytes() const {
return offset.Size() * sizeof(size_t) + data.Size() * sizeof(Entry);
}
/*! \brief clear the page */
inline void Clear() {
base_rowid = 0;
auto& offset_vec = offset.HostVector();
offset_vec.clear();
offset_vec.push_back(0);
data.HostVector().clear();
}
/*! \brief Set the base row id for this page. */
inline void SetBaseRowId(size_t row_id) {
base_rowid = row_id;
}
SparsePage GetTranspose(int num_columns) const;
void SortRows() {
auto ncol = static_cast<bst_omp_uint>(this->Size());
#pragma omp parallel for default(none) shared(ncol) schedule(dynamic, 1)
for (bst_omp_uint i = 0; i < ncol; ++i) {
if (this->offset.HostVector()[i] < this->offset.HostVector()[i + 1]) {
std::sort(
this->data.HostVector().begin() + this->offset.HostVector()[i],
this->data.HostVector().begin() + this->offset.HostVector()[i + 1],
Entry::CmpValue);
}
}
}
/*!
* \brief Push row block into the page.
* \param batch the row batch.
*/
void Push(const dmlc::RowBlock<uint32_t>& batch);
/**
* \brief Pushes external data batch onto this page
*
* \tparam AdapterBatchT
* \param batch
* \param missing
* \param nthread
*
* \return The maximum number of columns encountered in this input batch. Useful when pushing many adapter batches to work out the total number of columns.
*/
template <typename AdapterBatchT>
uint64_t Push(const AdapterBatchT& batch, float missing, int nthread);
/*!
* \brief Push a sparse page
* \param batch the row page
*/
void Push(const SparsePage &batch);
/*!
* \brief Push a SparsePage stored in CSC format
* \param batch The row batch to be pushed
*/
void PushCSC(const SparsePage& batch);
};
class CSCPage: public SparsePage {
public:
CSCPage() : SparsePage() {}
explicit CSCPage(SparsePage page) : SparsePage(std::move(page)) {}
};
class SortedCSCPage : public SparsePage {
public:
SortedCSCPage() : SparsePage() {}
explicit SortedCSCPage(SparsePage page) : SparsePage(std::move(page)) {}
};
class EllpackPageImpl;
/*!
* \brief A page stored in ELLPACK format.
*
* This class uses the PImpl idiom (https://en.cppreference.com/w/cpp/language/pimpl) to avoid
* including CUDA-specific implementation details in the header.
*/
class EllpackPage {
public:
/*!
* \brief Default constructor.
*
* This is used in the external memory case. An empty ELLPACK page is constructed with its content
* set later by the reader.
*/
EllpackPage();
/*!
* \brief Constructor from an existing DMatrix.
*
* This is used in the in-memory case. The ELLPACK page is constructed from an existing DMatrix
* in CSR format.
*/
explicit EllpackPage(DMatrix* dmat, const BatchParam& param);
/*! \brief Destructor. */
~EllpackPage();
/*! \return Number of instances in the page. */
size_t Size() const;
/*! \brief Set the base row id for this page. */
void SetBaseRowId(size_t row_id);
const EllpackPageImpl* Impl() const { return impl_.get(); }
EllpackPageImpl* Impl() { return impl_.get(); }
private:
std::unique_ptr<EllpackPageImpl> impl_;
};
template<typename T>
class BatchIteratorImpl {
public:
virtual ~BatchIteratorImpl() = default;
virtual T& operator*() = 0;
virtual const T& operator*() const = 0;
virtual void operator++() = 0;
virtual bool AtEnd() const = 0;
};
template<typename T>
class BatchIterator {
public:
using iterator_category = std::forward_iterator_tag;
explicit BatchIterator(BatchIteratorImpl<T>* impl) { impl_.reset(impl); }
void operator++() {
CHECK(impl_ != nullptr);
++(*impl_);
}
T& operator*() {
CHECK(impl_ != nullptr);
return *(*impl_);
}
const T& operator*() const {
CHECK(impl_ != nullptr);
return *(*impl_);
}
bool operator!=(const BatchIterator& rhs) const {
CHECK(impl_ != nullptr);
return !impl_->AtEnd();
}
bool AtEnd() const {
CHECK(impl_ != nullptr);
return impl_->AtEnd();
}
private:
std::shared_ptr<BatchIteratorImpl<T>> impl_;
};
template<typename T>
class BatchSet {
public:
explicit BatchSet(BatchIterator<T> begin_iter) : begin_iter_(begin_iter) {}
BatchIterator<T> begin() { return begin_iter_; }
BatchIterator<T> end() { return BatchIterator<T>(nullptr); }
private:
BatchIterator<T> begin_iter_;
};
/*!
* \brief This is data structure that user can pass to DMatrix::Create
* to create a DMatrix for training, user can create this data structure
* for customized Data Loading on single machine.
*
* On distributed setting, usually an customized dmlc::Parser is needed instead.
*/
template<typename T>
class DataSource : public dmlc::DataIter<T> {
public:
/*!
* \brief Meta information about the dataset
* The subclass need to be able to load this correctly from data.
*/
MetaInfo info;
};
/*!
* \brief Internal data structured used by XGBoost during training.
* There are two ways to create a customized DMatrix that reads in user defined-format.
*
* - Provide a dmlc::Parser and pass into the DMatrix::Create
* - Alternatively, if data can be represented by an URL, define a new dmlc::Parser and register by
* DMLC_REGISTER_DATA_PARSER;
* - This works best for user defined data input source, such as data-base, filesystem.
* - Provide a DataSource, that can be passed to DMatrix::Create
* This can be used to re-use inmemory data structure into DMatrix.
*/
class DMatrix {
public:
/*! \brief default constructor */
DMatrix() = default;
/*! \brief meta information of the dataset */
virtual MetaInfo& Info() = 0;
/*! \brief meta information of the dataset */
virtual const MetaInfo& Info() const = 0;
/**
* \brief Gets batches. Use range based for loop over BatchSet to access individual batches.
*/
template<typename T>
BatchSet<T> GetBatches(const BatchParam& param = {});
template <typename T>
bool PageExists() const;
// the following are column meta data, should be able to answer them fast.
/*! \return Whether the data columns single column block. */
virtual bool SingleColBlock() const = 0;
/*! \brief virtual destructor */
virtual ~DMatrix() = default;
/*! \brief Whether the matrix is dense. */
bool IsDense() const {
return Info().num_nonzero_ == Info().num_row_ * Info().num_col_;
}
/*!
* \brief Load DMatrix from URI.
* \param uri The URI of input.
* \param silent Whether print information during loading.
* \param load_row_split Flag to read in part of rows, divided among the workers in distributed mode.
* \param file_format The format type of the file, used for dmlc::Parser::Create.
* By default "auto" will be able to load in both local binary file.
* \param page_size Page size for external memory.
* \return The created DMatrix.
*/
static DMatrix* Load(const std::string& uri,
bool silent,
bool load_row_split,
const std::string& file_format = "auto",
size_t page_size = kPageSize);
/**
* \brief Creates a new DMatrix from an external data adapter.
*
* \tparam AdapterT Type of the adapter.
* \param [in,out] adapter View onto an external data.
* \param missing Values to count as missing.
* \param nthread Number of threads for construction.
* \param cache_prefix (Optional) The cache prefix for external memory.
* \param page_size (Optional) Size of the page.
*
* \return a Created DMatrix.
*/
template <typename AdapterT>
static DMatrix* Create(AdapterT* adapter, float missing, int nthread,
const std::string& cache_prefix = "",
size_t page_size = kPageSize);
/*! \brief page size 32 MB */
static const size_t kPageSize = 32UL << 20UL;
protected:
virtual BatchSet<SparsePage> GetRowBatches() = 0;
virtual BatchSet<CSCPage> GetColumnBatches() = 0;
virtual BatchSet<SortedCSCPage> GetSortedColumnBatches() = 0;
virtual BatchSet<EllpackPage> GetEllpackBatches(const BatchParam& param) = 0;
virtual bool EllpackExists() const = 0;
virtual bool SparsePageExists() const = 0;
};
template<>
inline BatchSet<SparsePage> DMatrix::GetBatches(const BatchParam&) {
return GetRowBatches();
}
template<>
inline bool DMatrix::PageExists<EllpackPage>() const {
return this->EllpackExists();
}
template<>
inline bool DMatrix::PageExists<SparsePage>() const {
return this->SparsePageExists();
}
template<>
inline BatchSet<CSCPage> DMatrix::GetBatches(const BatchParam&) {
return GetColumnBatches();
}
template<>
inline BatchSet<SortedCSCPage> DMatrix::GetBatches(const BatchParam&) {
return GetSortedColumnBatches();
}
template<>
inline BatchSet<EllpackPage> DMatrix::GetBatches(const BatchParam& param) {
return GetEllpackBatches(param);
}
} // namespace xgboost
namespace dmlc {
DMLC_DECLARE_TRAITS(is_pod, xgboost::Entry, true);
}
#endif // XGBOOST_DATA_H_
|
ivf.c | /*
Copyright © INRIA 2010-2011.
Authors: Matthijs Douze & Herve Jegou
Contact: matthijs.douze@inria.fr herve.jegou@inria.fr
This software is a computer program whose purpose is to provide
efficient tools for basic yet computationally demanding tasks,
such as find k-nearest neighbors using exhaustive search
and kmeans clustering.
This software is governed by the CeCILL license under French law and
abiding by the rules of distribution of free software. You can use,
modify and/ or redistribute the software under the terms of the CeCILL
license as circulated by CEA, CNRS and INRIA at the following URL
"http://www.cecill.info".
As a counterpart to the access to the source code and rights to copy,
modify and redistribute granted by the license, users are provided only
with a limited warranty and the software's author, the holder of the
economic rights, and the successive licensors have only limited
liability.
In this respect, the user's attention is drawn to the risks associated
with loading, using, modifying and/or developing or reproducing the
software by the user in light of its specific status of free software,
that may mean that it is complicated to manipulate, and that also
therefore means that it is reserved for developers and experienced
professionals having in-depth computer knowledge. Users are therefore
encouraged to load and test the software's suitability as regards their
requirements in conditions enabling the security of their systems and/or
data to be ensured and, more generally, to use and operate it in the
same conditions as regards security.
The fact that you are presently reading this means that you have had
knowledge of the CeCILL license and that you accept its terms.
*/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <string.h>
#include "ivf.h"
#include "hamming.h"
/* geometric re-allocation add a relative 50% of additional memory */
#define IVF_REALLOC_NEWSIZE(oldsize) (4+((oldsize * 3) / 2))
int ivf_checksum (const ivf_t * ivf)
{
return (ivf->checksum == IVFCHECKSUM);
}
ivf_t *ivf_new (int k, int elemsize, int seg_size)
{
int i;
if (seg_size == 0)
seg_size = DEFAULT_SEG_SIZE;
ivf_t *ivf = (ivf_t *) malloc (sizeof (ivf_t));
if (!ivf)
return NULL;
ivf->checksum = IVFCHECKSUM;
ivf->k = k+1; /* k+1 to be able to use the indexing convention of matlab */
ivf->n = 0;
ivf->elem_size = elemsize;
ivf->nbelems = (int *) malloc (sizeof (int) * ivf->k);
ivf->seg_size = (int *) malloc (sizeof (int) * ivf->k);
ivf->ids = (int **) malloc (sizeof (*ivf->ids) * ivf->k);
if (!ivf->ids) {
ivf_delete (ivf);
return NULL;
}
ivf->adat = (unsigned char **) malloc (sizeof (*ivf->adat) * ivf->k);
if (!ivf->ids) {
ivf_delete (ivf);
return NULL;
}
/* a minimum allocated segment size by default equal to seg_size */
for (i = 0; i < ivf->k ; i++) {
ivf->seg_size[i] = seg_size;
ivf->nbelems[i] = 0;
ivf->ids[i] = (int *) malloc (sizeof (**ivf->ids) * ivf->seg_size[i]);
if (!ivf->ids[i]) {
ivf_delete (ivf);
return NULL;
}
}
/* store additional info only if elem_size > 0 */
for (i = 0; i < ivf->k ; i++) {
ivf->adat[i] = (unsigned char *) malloc (ivf->elem_size * ivf->seg_size[i]);
if (!ivf->adat[i]) {
ivf_delete (ivf);
return NULL;
}
}
return ivf;
}
void ivf_delete (ivf_t * ivf)
{
int i;
if (ivf == NULL)
return;
if (ivf->ids)
for (i = 0; i < ivf->k; i++)
free (ivf->ids[i]);
if (ivf->adat)
for (i = 0; i < ivf->k; i++)
free (ivf->adat[i]);
free (ivf->nbelems);
free (ivf->seg_size);
free (ivf->ids);
free (ivf->adat);
free (ivf);
}
void ivf_addn (ivf_t * ivf, const int * ids, const int * keys,
const unsigned char * adat, const int n)
{
int i, w, j, elem_size = ivf->elem_size;
for (i = 0; i < n; i++) {
w = keys[i];
if (! (w >= 0 && w < ivf->k)) {
fprintf (stderr, "# Invalid key id : %d (range must be %d-%d)\n",
w, 0, ivf->k-1);
return;
}
/* First check if a realloc is required or not */
if (ivf->nbelems[w] + 1 == ivf->seg_size[w]) { /* -> YES, it is */
ivf->seg_size[w] = IVF_REALLOC_NEWSIZE (ivf->seg_size[w]);
ivf->ids[w] = (int *) realloc (ivf->ids[w], (int) ivf->seg_size[w]
* sizeof (*ivf->ids[w]));
assert (ivf->ids[w]);
if (elem_size == 0)
continue;
ivf->adat[w] = (unsigned char *)realloc (ivf->adat[w], (int) ivf->seg_size[w] * elem_size);
assert (ivf->adat[w]);
}
j = ivf->nbelems[w];
/* Store the id of the image or vector */
ivf->ids[w][j] = ids[i];
if (elem_size > 0)
memcpy (ivf->adat[w] + j * elem_size, adat + i * elem_size, elem_size);
/* update the number of elements */
ivf->nbelems[w]++;
}
ivf->n += n;
}
void ivf_add (ivf_t * ivf, int key, int id, const unsigned char * val)
{
ivf_addn (ivf, &id, &key, val, 1);
}
int ivf_get_nb_elems (const ivf_t * ivf, int key)
{
assert (key >= 0 && key < ivf->k);
return ivf->nbelems[key];
}
int * ivf_get_ids (const ivf_t * ivf, int key)
{
assert (key >= 0 && key < ivf->k);
return ivf->ids[key];
}
unsigned char * ivf_get_vals (const ivf_t * ivf, int key)
{
assert (key >= 0 && key < ivf->k);
return ivf->adat[key];
}
unsigned char * ivf_find_vals (const ivf_t * ivf, int * keys, int * ids, int n)
{
unsigned char * dat = (unsigned char *) malloc (ivf->elem_size * n);
int j = 0, i, f;
while (j<n) {
f = 0;
for (i = 0; i < ivf->nbelems[keys[j]]; i++) {
if (ivf->ids[keys[j]][i] == ids[j]) {
f = 1;
memcpy(dat + j * ivf->elem_size, ivf->adat[keys[j]] + i * ivf->elem_size, ivf->elem_size);
j++;
if (j>=n)
break;
if (keys[j] != keys[j-1])
break;
}
}
if (f!=1) /* id not found in the given key list, wrong input */
{
free (dat);
return NULL;
}
}
return dat;
}
void ivf_display (const ivf_t * ivf)
{
int i, j;
printf ("Nb lists %d\n", ivf->k);
/* for each segment, display the contents */
for (i = 0; i < ivf->k; i++) {
if (ivf->nbelems[i] > 0)
fprintf (stdout, "[ List %d ] %d elements (seg_size: %d)\n", i,
ivf->nbelems[i], ivf->seg_size[i]);
else continue;
for (j = 0; j < ivf->nbelems[i]; j++)
printf ("%8d / ", (int) ivf->ids[i][j]);
printf ("\n");
}
}
/* Count the total number of elements (descriptors) in the inverted file */
int ivf_count_nbelems (const ivf_t * ivf)
{
int tot = 0, i;
for (i = 0 ; i < ivf->k ; i++)
tot += ivf->nbelems[i];
return tot;
}
/* compute the imbalance factor */
double ivf_imbalance_factor (const ivf_t * ivf)
{
int * hist = ivf->nbelems;
int n = ivf->k, vw;
double tot = 0, uf = 0;
for (vw = 0 ; vw < n ; vw++) {
tot += hist[vw];
uf += hist[vw] * (double) hist[vw];
}
return uf * n / (tot * tot);
}
/* I/O */
#define WRITECHECK(a,n) if(fwrite(a,sizeof(*a),n,f)!=n) {perror("ivf_fwrite"); fprintf (stderr, "ivfc - LINE %d\n", __LINE__); return 0; }
#define READCHECK(a,n) if(fread(a,sizeof(*a),n,f)!=n) {perror("ivf_read"); fprintf (stderr, "ivfc - LINE %d\n", __LINE__); return NULL; }
int ivf_save (const char * fname, const ivf_t * ivf)
{
int i;
FILE * f = fopen (fname, "w");
if (!f) {
perror ("ivf_save - can't open the file");
return 0;
}
WRITECHECK (&ivf->checksum, 1);
WRITECHECK (&ivf->k, 1);
WRITECHECK (&ivf->n, 1);
WRITECHECK (&ivf->elem_size, 1);
WRITECHECK (ivf->nbelems, ivf->k);
for (i = 0 ; i < ivf->k ; i++)
WRITECHECK (ivf->ids[i], ivf->nbelems[i]);
/* optionally write the complementary information */
if (ivf->elem_size > 0)
for (i = 0 ; i < ivf->k ; i++)
WRITECHECK (ivf->adat[i], ivf->elem_size * ivf->nbelems[i]);
fclose (f);
return 1;
}
ivf_t * ivf_load (const char * fname)
{
int i;
FILE * f = fopen (fname, "r");
if (!f) {
fprintf (stderr, "Unable to open the file %s\n", fname);
return NULL;
}
ivf_t *ivf = (ivf_t *) malloc (sizeof (ivf_t));
if (!ivf)
return NULL;
READCHECK (&ivf->checksum, 1);
if (ivf->checksum != IVFCHECKSUM) {
fprintf (stderr, "# ivf_fread: incorrect checksum\n");
return NULL;
}
READCHECK (&(ivf->k), 1);
READCHECK (&(ivf->n), 1);
READCHECK (&(ivf->elem_size), 1);
ivf->nbelems = (int *) malloc (sizeof (int) * ivf->k);
ivf->seg_size = (int *) malloc (sizeof (int) * ivf->k);
ivf->ids = (int **) malloc (sizeof (*ivf->ids) * ivf->k);
if (!ivf->ids || !ivf->seg_size || !ivf->ids) {
ivf_delete (ivf);
return NULL;
}
READCHECK (ivf->nbelems, ivf->k);
for (i = 0 ; i < ivf->k ; i++)
ivf->seg_size[i] = ivf->nbelems[i] > DEFAULT_SEG_SIZE ? ivf->nbelems[i] : DEFAULT_SEG_SIZE;
for (i = 0 ; i < ivf->k ; i++) {
ivf->ids[i] = (int *) malloc (sizeof (**ivf->ids) * ivf->seg_size[i]);
assert (ivf->ids[i]); /* not that good as a check */
READCHECK (ivf->ids[i], ivf->nbelems[i]);
}
/* optionally read the complementary information */
if (ivf->elem_size > 0) {
ivf->adat = (unsigned char **) malloc (sizeof (*ivf->adat) * ivf->k);
for (i = 0 ; i < ivf->k ; i++) {
ivf->adat[i] = (unsigned char *) malloc (ivf->elem_size * ivf->seg_size[i]);
assert (ivf->adat[i]);
if (ivf->nbelems[i] > 0)
READCHECK (ivf->adat[i], ivf->elem_size * ivf->nbelems[i]);
}
}
fclose (f);
return ivf;
}
#undef WRITECHECK
#undef READCHECK
ivfmatch_t * ivfmatch_new (int n)
{
return (ivfmatch_t *) malloc (n * sizeof (ivfmatch_t));
}
ivfmatch_t * ivfmatch_realloc (ivfmatch_t * m, int n)
{
return (ivfmatch_t *) realloc (m, n * sizeof (ivfmatch_t));
}
/* Compute the set of matches according to Hamming distance threshold
Parameters
ids may be anything (just used to identify the queries submitted
keys the quantization indexes
adat the set of binary signature associated with the query
n the number of input query vector
*/
ivfmatch_t * ivf_hequery (const ivf_t * ivf,
const int * qids, const int * keys,
const unsigned char * adat, const int nq,
int * buffer_size, int ht)
{
int i, j, posm = 0;
int bufsize = *buffer_size;
int elem_size = ivf->elem_size;
ivfmatch_t * matches = ivfmatch_new (bufsize);
ivfmatch_t * m = matches; /* For arithmetic pointer optimization */
const unsigned char * qbs = adat;
unsigned int h;
for (i = 0 ; i < nq ; i++) {
const int qid = qids[i];
int listno = keys[i];
int listlen = ivf_get_nb_elems (ivf, listno);
int * listids = ivf->ids[listno];
const unsigned char * dbs = ivf->adat[listno];
for (j = 0 ; j < listlen ; j++) {
/* Here perform the real work of computing the distance */
h = hamming (qbs, dbs, elem_size);
/* collect the match only if this satisfies the threshold */
if (h <= ht) {
/* Enough space to store another match ? */
if (posm >= bufsize) {
/* fprintf (stderr, "Realloc match buffer: %d -> %d\n", bufsize, IVF_REALLOC_NEWSIZE (bufsize)); */
bufsize = IVF_REALLOC_NEWSIZE (bufsize);
matches = ivfmatch_realloc (matches, bufsize);
assert (matches != NULL);
m = matches + posm;
}
m->qid = qid;
m->bid = listids[j];
m->score = h;
m++;
posm++;
}
dbs += elem_size; /* next signature in inverted list */
}
qbs += elem_size; /* next binary signature */
}
/* output the number of elements that been actually selected */
*buffer_size = posm;
return matches;
}
/* Collect matches */
hammatch_t ** ivf_he_collect (const ivf_t * ivf, const int * keys,
const unsigned char * qbs, int nq,
int ht, size_t * nmatches)
{
int i, nbufinit = 512;
/* Match entities and number of matches per query */
hammatch_t ** hmlist = (hammatch_t **) malloc (sizeof(*hmlist) * nq);
#ifdef _OPENMP
#pragma omp parallel for private (i)
for (i = 0 ; i < nq ; i++) {
match_hamming_thres (qbs + i * ivf->elem_size, ivf->adat[keys[i]],
1, ivf_get_nb_elems (ivf, keys[i]), /* size of the inverted list */
ht, ivf->elem_size, nbufinit, hmlist+i, nmatches+i);
}
#else
for (i = 0 ; i < nq ; i++) {
match_hamming_thres (qbs + i * ivf->elem_size, ivf->adat[keys[i]],
1, ivf_get_nb_elems (ivf, keys[i]), /* size of the inverted list */
ht, ivf->elem_size, nbufinit, hmlist+i, nmatches+i);
}
#endif
return hmlist;
}
ivfmatch_t * ivf_hequeryw (const ivf_t * ivf,
const int * qids, const int * keys,
const unsigned char * qbs, int nq,
int ht, size_t * totmatches,
const float * score_map_, const float * list_w_)
{
size_t i, j;
/* Match entities to count number of matches per query */
size_t * nmatches = (size_t *) malloc (sizeof(*nmatches) * nq);
hammatch_t ** hmlist = ivf_he_collect (ivf, keys, qbs, nq, ht, nmatches);
/* compute the cumulative number of matches */
size_t * cumnmatches = (size_t *) malloc (sizeof (*cumnmatches) * (nq+1));
cumnmatches[0] = 0;
for (i = 0 ; i < nq ; i++)
cumnmatches[i+1] = nmatches[i] + cumnmatches[i];
*totmatches = cumnmatches[nq];
/* Populate the output structure */
ivfmatch_t * matches = ivfmatch_new (*totmatches);
/* if score_map is undefined, just returns the Hamming distances
listweight should be NULL in this case to avoid an unexpected behavior */
float * score_map = (float *) score_map_;
if (score_map == NULL) {
score_map = (float *) malloc ((ht+1) * sizeof (*score_map));
for (i = 0 ; i <= ht ; i++)
score_map[i] = i;
}
/* list_w is typically usedfor idf. Set to 1 by default. */
float * list_w = (float *) list_w_;
if (list_w == NULL) {
list_w = (float *) malloc (ivf->k * sizeof (*list_w));
for (i = 0 ; i < ivf->k ; i++)
list_w[i] = 1;
}
for (i = 0 ; i < nq ; i++) {
const int * listids = ivf->ids[keys[i]];
ivfmatch_t * m = matches + cumnmatches[i];
hammatch_t * hm = hmlist[i];
for (j = 0 ; j < nmatches[i] ; j++) {
m->qid = qids[i];
m->bid = listids[hm->bid];
m->score = score_map[hm->score] * list_w[keys[i]];
m++;
hm++;
}
}
for (i = 0 ; i < nq ; i++)
free (hmlist[i]);
free (hmlist);
free (cumnmatches);
free (nmatches);
if (score_map_ == NULL)
free (score_map);
if (list_w == NULL)
free (list_w);
return matches;
}
/* Collect cross-matches with Hamming distance */
hammatch_t ** ivf_he_collect_crossmatches (const ivf_t * ivf, int ht, size_t * nmatches)
{
int i, nbufinit = 512;
/* Match entities and number of matches per query */
hammatch_t ** hmlist = (hammatch_t **) malloc (sizeof(*hmlist) * ivf->k);
#ifdef _OPENMP
#pragma omp parallel for private (i)
for (i = 0 ; i < ivf->k ; i++) {
crossmatch_hamming (ivf->adat[i], ivf_get_nb_elems (ivf, i),
ht, ivf->elem_size, nbufinit, hmlist+i, nmatches+i);
hammatch_t *m = hmlist[i];
const int * listids = ivf->ids[i];
long j, n = nmatches[i];
for (j = 0 ; j < n ; j++) {
m->qid = listids[m->qid];
m->bid = listids[m->bid];
m++;
}
}
#else
for (i = 0 ; i < ivf->k ; i++) {
crossmatch_hamming (ivf->adat[i], ivf_get_nb_elems (ivf, i),
ht, ivf->elem_size, nbufinit, hmlist+i, nmatches+i);
hammatch_t *m = hmlist[i];
const int * listids = ivf->ids[i];
int j;
for (j = 0 ; j < nmatches[i] ; j++) {
m->qid = listids[m->qid];
m->bid = listids[m->bid];
m++;
}
}
#endif
return hmlist;
}
/* Collect cross-matches with Hamming distance */
void ivf_he_crossmatches_prealloc (const ivf_t * ivf, int ht,
int * idx, uint16 * hams,
size_t * cumnmatches)
{
long i;
#ifdef _OPENMP
#pragma omp parallel for private (i)
for (i = 0 ; i < ivf->k ; i++) {
crossmatch_hamming_prealloc (ivf->adat[i], ivf_get_nb_elems (ivf, i),
ht, ivf->elem_size,
idx + 2 * cumnmatches[i],
hams + cumnmatches[i]);
long n = cumnmatches[i+1] - cumnmatches[i];
int * m = idx + 2 * cumnmatches[i];
const int * listids = ivf->ids[i];
long j;
for (j = 0 ; j < n ; j++) {
*m = listids[*m]; m++;
*m = listids[*m]; m++;
}
}
#else
for (i = 0 ; i < ivf->k ; i++) {
int nout = crossmatch_hamming_prealloc (ivf->adat[i], ivf_get_nb_elems (ivf, i),
ht, ivf->elem_size,
idx + 2 * cumnmatches[i],
hams + cumnmatches[i]);
long n = cumnmatches[i+1] - cumnmatches[i];
assert (nout == n);
int * m = idx + 2 * cumnmatches[i];
const int * listids = ivf->ids[i];
long j;
for (j = 0 ; j < n ; j++) {
*m = listids[*m]; m++;
*m = listids[*m]; m++;
}
}
#endif
}
/* Couny cross-matches with Hamming distance */
void ivf_he_count_crossmatches (const ivf_t * ivf, int ht, size_t * nmatches)
{
long i;
#ifdef _OPENMP
#pragma omp parallel for private (i)
for (i = 0 ; i < ivf->k ; i++) {
crossmatch_hamming_count (ivf->adat[i], ivf_get_nb_elems (ivf, i),
ht, ivf->elem_size, nmatches+i);
}
#else
for (i = 0 ; i < ivf->k ; i++) {
crossmatch_hamming_count (ivf->adat[i], ivf_get_nb_elems (ivf, i),
ht, ivf->elem_size, nmatches+i);
}
#endif
}
|
scanrom.c | #include "scanrom.h"
#include "floram_util.h"
void scanrom_read_with_bitvector_offline(uint8_t * output_share, uint8_t * rom_memory, bool * bitvector, size_t memblocksize, size_t blockcount) {
memset(output_share, 0, memblocksize);
uint64_t * rm = rom_memory;
bool * biv = bitvector;
uint64_t ** sums;
size_t threadcount;
floram_set_procs_for_data_size(memblocksize * blockcount);
#pragma omp parallel
{
threadcount = omp_get_num_threads();
#pragma omp single
sums = malloc(threadcount * sizeof(uint64_t *));
uint64_t * s;
floram_zpma(&s, 16, memblocksize);
sums[omp_get_thread_num()] = s;
#pragma omp for schedule(guided)
for (size_t ii = 0; ii < blockcount; ii++) {
#pragma omp simd aligned(rm,biv,s:16)
for (size_t jj = 0; jj < memblocksize /sizeof(uint64_t); jj++) {
s[jj] ^= biv[ii] * rm[ii * ((memblocksize) /sizeof(uint64_t)) + jj];
}
}
}
for (size_t ii = 0; ii < threadcount; ii++) {
for (size_t jj = 0; jj < memblocksize /sizeof(uint64_t); jj++) {
((uint64_t *)output_share)[jj] ^= sums[ii][jj];
}
free(sums[ii]);
}
free(sums);
}
void scanrom_read_with_blockvector_offline(uint8_t * z_and_output, uint8_t * rom_memory, bool * bitvector, uint8_t * blockvector, size_t memblocksize, size_t blockcount) {
uint64_t * rm = rom_memory;
uint64_t ** sums;
size_t threadcount;
floram_set_procs_for_data_size(memblocksize * blockcount);
#pragma omp parallel
{
threadcount = omp_get_num_threads();
#pragma omp single
sums = malloc(threadcount * sizeof(uint64_t *));
uint64_t * s;
floram_zpma(&s, 16, memblocksize);
sums[omp_get_thread_num()] = s;
#pragma omp for schedule(guided)
for (size_t ii = 0; ii < blockcount; ii++) {
bool bitvector_bit = bitvector[ii/(BLOCKSIZE * 8)];
uint8_t z_block_part = z_and_output[(ii%(BLOCKSIZE*8))/8];
uint8_t * blockvector_block = &blockvector[(ii/(BLOCKSIZE * 8))*BLOCKSIZE];
uint8_t blockvector_block_part = blockvector_block[(ii%(BLOCKSIZE*8))/8];
bool b_temp = (((bitvector_bit * z_block_part) ^ blockvector_block_part) >> (ii % 8)) & 0x1;
#pragma omp simd aligned(rm,s:16)
for (size_t jj = 0; jj < memblocksize /sizeof(uint64_t); jj++) {
s[jj] ^= b_temp * rm[ii * memblocksize/sizeof(uint64_t) + jj];
}
}
}
memset(z_and_output, 0, memblocksize);
for (size_t ii = 0; ii < threadcount; ii++) {
for (size_t jj = 0; jj < memblocksize /sizeof(uint64_t); jj++) {
((uint64_t *)z_and_output)[jj] ^= sums[ii][jj];
}
free(sums[ii]);
}
free(sums);
}
#ifdef SCANROM_DISABLE_ENCRYPTION
void scanrom_encrypt_offline(uint8_t * out, uint8_t * in, uint8_t* key, size_t index, size_t blockmultiple, size_t blockcount) {
if (in == NULL) {
memset(out, 0, BLOCKSIZE * blockcount * blockmultiple);
} else {
memcpy(out, in, BLOCKSIZE * blockcount * blockmultiple);
}
}
#else
void scanrom_encrypt_offline(uint8_t * out, uint8_t * in, uint8_t* key, size_t index, size_t blockmultiple, size_t blockcount) {
offline_expand_from(out, key, index*blockmultiple, blockcount * blockmultiple);
if (in != NULL) {
floram_set_procs_for_data_size(BLOCKSIZE * blockmultiple * blockcount);
#pragma omp parallel for simd schedule(guided)
for (size_t ii = 0; ii < blockcount * blockmultiple * BLOCKSIZE / sizeof(uint64_t); ii++) {
((uint64_t *)out)[ii] ^= ((uint64_t *)in)[ii];
}
}
}
#endif
void scanwrom_write_with_blockvector_offline(uint8_t * wrom_memory, uint8_t * blockvector, bool * bitvector, uint8_t*z_block, size_t memblocksize, size_t blockcount) {
uint64_t * wm = wrom_memory;
uint64_t * blv = blockvector;
bool * biv = bitvector;
uint64_t * z = z_block;
floram_set_procs_for_data_size(memblocksize * blockcount);
#pragma omp parallel for schedule(guided)
for (size_t ii = 0; ii< blockcount; ii++) {
#pragma omp simd aligned(wm,blv,biv,z:16)
for (size_t jj = 0; jj < memblocksize/sizeof(uint64_t); jj++) {
wm[ii * memblocksize/sizeof(uint64_t) + jj] ^= blv[ii * memblocksize/sizeof(uint64_t) + jj] ^ (biv[ii] * z[jj]);
}
}
} |
kncmbpush3.c | /* KNC C Library for Skeleton 3D Electromagnetic Vector PIC Code */
/* written by Viktor K. Decyk, UCLA and Ricardo Fonseca, ISCTE */
#include <stdlib.h>
#include <stdio.h>
#include <complex.h>
#include <math.h>
#include <string.h>
#include <immintrin.h>
#include "kncmbpush3.h"
/*--------------------------------------------------------------------*/
void ckncgbppush3lt(float ppart[], float fxyz[], float bxyz[],
int kpic[], float qbm, float dt, float dtc,
float *ek, int idimp, int nppmx, int nx, int ny,
int nz, int mx, int my, int mz, int nxv, int nyv,
int nzv, int mx1, int my1, int mxyz1,int ipbc) {
/* for 3d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, with magnetic field. Using the Boris Mover.
OpenMP/vector version using guard cells
data read in tiles
particles stored segmented array
190 flops/particle, 1 divide, 54 loads, 6 stores
input: all, output: ppart, ek
velocity equations used are:
vx(t+dt/2) = rot(1)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) +
rot(2)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) +
rot(3)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) +
.5*(q/m)*fx(x(t),y(t),z(t))*dt)
vy(t+dt/2) = rot(4)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) +
rot(5)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) +
rot(6)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) +
.5*(q/m)*fy(x(t),y(t),z(t))*dt)
vz(t+dt/2) = rot(7)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) +
rot(8)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) +
rot(9)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) +
.5*(q/m)*fz(x(t),y(t),z(t))*dt)
where q/m is charge/mass, and the rotation matrix is given by:
rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2)
rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2)
rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2)
and om**2 = omx**2 + omy**2 + omz**2
the rotation matrix is determined by:
omx = (q/m)*bx(x(t),y(t),z(t)), omy = (q/m)*by(x(t),y(t),z(t)), and
omz = (q/m)*bz(x(t),y(t),z(t)).
position equations used are:
x(t+dt)=x(t) + vx(t+dt/2)*dt
y(t+dt)=y(t) + vy(t+dt/2)*dt
z(t+dt)=z(t) + vz(t+dt/2)*dt
fx(x(t),y(t),z(t)), fy(x(t),y(t),z(t)), and fz(x(t),y(t),z(t)),
bx(x(t),y(t),z(t)), by(x(t),y(t),z(t)), and bz(x(t),y(t),z(t))
are approximated by interpolation from the nearest grid points:
fx(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fx(n,m,l)+dx*fx(n+1,m,l))
+ dy*((1-dx)*fx(n,m+1,l) + dx*fx(n+1,m+1,l)))
+ dz*((1-dy)*((1-dx)*fx(n,m,l+1)+dx*fx(n+1,m,l+1))
+ dy*((1-dx)*fx(n,m+1,l+1) + dx*fx(n+1,m+1,l+1)))
where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l
similarly for fy(x,y,z), fz(x,y,z), bx(x,y,z), by(x,y,z), bz(x,y,z)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = position z of particle n in tile m
ppart[m][3][n] = velocity vx of particle n in tile m
ppart[m][4][n] = velocity vy of particle n in tile m
ppart[m][5][n] = velocity vz of particle n in tile m
fxyz[l][k][j][0] = x component of force/charge at grid (j,k,l)
fxyz[l][k][j][1] = y component of force/charge at grid (j,k,l)
fxyz[l][k][j][2] = z component of force/charge at grid (j,k,l)
that is, convolution of electric field over particle shape
bxyz[l][k][j][0] = x component of magnetic field at grid (j,k,l)
bxyz[l][k][j][1] = y component of magnetic field at grid (j,k,l)
bxyz[l][k][j][2] = z component of magnetic field at grid (j,k,l)
that is, the convolution of magnetic field over particle shape
kpic = number of particles per tile
qbm = particle charge/mass ratio
dt = time interval between successive force calculations
dtc = time interval between successive co-ordinate calculations
kinetic energy/mass at time t is also calculated, using
ek = .5*sum((vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 +
(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 +
.25*(vz(t+dt/2) + vz(t-dt/2))**2)
idimp = size of phase space = 6
nppmx = maximum number of particles in tile
nx/ny/nz = system length in x/y/z direction
mx/my/mz = number of grids in sorting cell in x/y/z
nxv = second dimension of field arrays, must be >= nx+1
nyv = third dimension of field arrays, must be >= ny+1
nzv = fourth dimension of field array, must be >= nz+1
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mxyz1 = mx1*my1*mz1,
where mz1 = (system length in z direction - 1)/mz + 1
ipbc = particle boundary condition = (0,1,2,3) =
(none,3d periodic,3d reflecting,mixed 2d reflecting/1d periodic)
requires KNC, ppart needs to be 64 byte aligned
nppmx needs to be a multiple of 16
fxyz needs to have 4 components, although one is not used
local data */
#define MXV 17
#define MYV 17
#define MZV 17
int mxy1, noff, moff, loff, npoff, npp, nps;
int i, j, k, l, m, nn, mm, ll, nm, mxv, myv, mxyv, nxyv;
float qtmh, edgelx, edgely, edgelz, edgerx, edgery, edgerz;
float dxp, dyp, dzp, amx, amy, amz, dx, dy, dz, ox, oy, oz, dx1;
float acx, acy, acz, omxt, omyt, omzt, omt, anorm;
float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9;
float x, y, z, vx, vy, vz;
double sum1, sum2;
__m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4;
__m512i v_nn, v_mm, v_ll, v_nm, v_it, v_perm;
__m512 v_qtmh, v_dt, v_dtc, v_one, v_zero;
__m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz;
__m512 v_dx1, v_at, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz;
__m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz;
__m512 a, b, c, d, e, f, g, h, p, q, r, s;
__m512 v_two, v_half, v_ox, v_oy, v_oz;
__m512d v_sum1, v_d;
__mmask16 msk;
__attribute__((aligned(64))) unsigned int kk[16];
__attribute__((aligned(64))) double dd[8];
__attribute__((aligned(64))) float sfxyz[4*MXV*MYV*MZV];
__attribute__((aligned(64))) float sbxyz[4*MXV*MYV*MZV];
/* __attribute__((aligned(64))) float sfxyz[4*(mx+1)*(my+1)*(mz+1)]; */
/* __attribute__((aligned(64))) float sbxyz[4*(mx+1)*(my+1)*(mz+1)]; */
mxy1 = mx1*my1;
/* mxv = MXV; */
/* myv = MYV; */
mxv = mx+1;
myv = my+1;
mxyv = mxv*myv;
nxyv = nxv*nyv;
qtmh = 0.5f*qbm*dt;
sum2 = 0.0;
/* set boundary values */
edgelx = 0.0f;
edgely = 0.0f;
edgelz = 0.0f;
edgerx = (float) nx;
edgery = (float) ny;
edgerz = (float) nz;
if (ipbc==2) {
edgelx = 1.0f;
edgely = 1.0f;
edgelz = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
edgerz = (float) (nz-1);
}
else if (ipbc==3) {
edgelx = 1.0f;
edgely = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
}
v_mxv4 = _mm512_set1_epi32(4*mxv);
v_mxyv4 = _mm512_set1_epi32(4*mxyv);
v_perm = _mm512_set_epi32(15,11,7,3,14,10,6,2,13,9,5,1,12,8,4,0);
v_qtmh = _mm512_set1_ps(qtmh);
v_dt = _mm512_set1_ps(dt);
v_dtc = _mm512_set1_ps(dtc);
v_one = _mm512_set1_ps(1.0f);
v_zero = _mm512_setzero_ps();
v_two = _mm512_set1_ps(2.0f);
v_half = _mm512_set1_ps(0.5f);
v_edgelx = _mm512_set1_ps(edgelx);
v_edgely = _mm512_set1_ps(edgely);
v_edgelz = _mm512_set1_ps(edgelz);
v_edgerx = _mm512_set1_ps(edgerx);
v_edgery = _mm512_set1_ps(edgery);
v_edgerz = _mm512_set1_ps(edgerz);
v_sum1 = _mm512_set1_pd(0.0);
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,l,m,noff,moff,loff,npp,npoff,nps,nn,mm,ll,nm,x,y,z,vx, \
vy,vz,dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,ox,oy,oz,acx,acy,acz,omxt, \
omyt,omzt,omt,anorm,rot1,rot2,rot3,rot4,rot5,rot6,rot7,rot8,rot9,sum1, \
v_noff,v_moff,v_loff,v_nn,v_mm,v_ll,v_nm,v_it,v_x,v_y,v_z,v_dxp,v_dyp, \
v_dzp,v_amx,v_amy,v_amz,v_dx1,v_dx,v_dy,v_dz,v_vx,v_vy,v_vz,v_ox,v_oy, \
v_oz,v_at,v_d,v_sum1,a,b,c,d,e,f,g,h,p,q,r,s,msk,kk,dd,sfxyz,sbxyz) \
reduction(+:sum2)
for (l = 0; l < mxyz1; l++) {
loff = l/mxy1;
k = l - mxy1*loff;
loff = mz*loff;
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
v_noff = _mm512_set1_epi32(noff);
v_moff = _mm512_set1_epi32(moff);
v_loff = _mm512_set1_epi32(loff);
npp = kpic[l];
npoff = idimp*nppmx*l;
/* load local fields from global array */
nn = (mx < nx-noff ? mx : nx-noff) + 1;
mm = (my < ny-moff ? my : ny-moff) + 1;
ll = (mz < nz-loff ? mz : nz-loff) + 1;
nps = 4*(nn/4);
/* load electric field */
for (k = 0; k < ll; k++) {
for (j = 0; j < mm; j++) {
/* vector loop over elements in blocks of 4 */
/* for (i = 0; i < nn; i++) { */
/* sfxyz[4*(i+mxv*j+mxyv*k)] */
/* = fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sfxyz[1+4*(i+mxv*j+mxyv*k)] */
/* = fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sfxyz[2+4*(i+mxv*j+mxyv*k)] */
/* = fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* } */
for (i = 0; i < nps; i+=4) {
m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff));
v_at = _mm512_loadunpacklo_ps(v_at,&fxyz[m]);
v_at = _mm512_loadunpackhi_ps(v_at,&fxyz[m+16]);
m = 4*(i + mxv*j + mxyv*k);
_mm512_packstorelo_ps(&sfxyz[m],v_at);
_mm512_packstorehi_ps(&sfxyz[m+16],v_at);
}
/* loop over remaining elements */
for (i = nps; i < nn; i++) {
sfxyz[4*(i+mxv*j+mxyv*k)]
= fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sfxyz[1+4*(i+mxv*j+mxyv*k)]
= fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sfxyz[2+4*(i+mxv*j+mxyv*k)]
= fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sfxyz[3+4*(i+mxv*j+mxyv*k)]
= fxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
}
}
}
/* load magnetic field */
for (k = 0; k < ll; k++) {
for (j = 0; j < mm; j++) {
/* vector loop over elements in blocks of 4 */
/* for (i = 0; i < nn; i++) { */
/* sbxyz[4*(i+mxv*j+mxyv*k)] */
/* = bxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sbxyz[1+4*(i+mxv*j+mxyv*k)] */
/* = bxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sbxyz[2+4*(i+mxv*j+mxyv*k)] */
/* = bxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* } */
for (i = 0; i < nps; i+=4) {
m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff));
v_at = _mm512_loadunpacklo_ps(v_at,&bxyz[m]);
v_at = _mm512_loadunpackhi_ps(v_at,&bxyz[m+16]);
m = 4*(i + mxv*j + mxyv*k);
_mm512_packstorelo_ps(&sbxyz[m],v_at);
_mm512_packstorehi_ps(&sbxyz[m+16],v_at);
}
/* loop over remaining elements */
for (i = nps; i < nn; i++) {
sbxyz[4*(i+mxv*j+mxyv*k)]
= bxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sbxyz[1+4*(i+mxv*j+mxyv*k)]
= bxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sbxyz[2+4*(i+mxv*j+mxyv*k)]
= bxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sbxyz[3+4*(i+mxv*j+mxyv*k)]
= bxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
}
}
}
nps = 16*(npp/16);
sum1 = 0.0;
v_sum1 = _mm512_set1_pd(0.0);
/* loop over particles in tile in blocks of 16 */
for (j = 0; j < nps; j+=16) {
/* find interpolation weights */
/* x = ppart[j+npoff]; */
/* y = ppart[j+nppmx+npoff]; */
/* z = ppart[j+2*nppmx+npoff]; */
v_x = _mm512_load_ps(&ppart[j+npoff]);
v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]);
v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]);
/* nn = x; */
/* mm = y; */
/* ll = z; */
v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
/* dxp = x - (float) nn; */
/* dyp = y - (float) mm; */
/* dzp = z - (float) ll; */
v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dxp = _mm512_sub_ps(v_x,v_dxp);
v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dyp = _mm512_sub_ps(v_y,v_dyp);
v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dzp = _mm512_sub_ps(v_z,v_dzp);
/* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */
v_nn = _mm512_sub_epi32(v_nn,v_noff);
v_mm = _mm512_sub_epi32(v_mm,v_moff);
v_ll = _mm512_sub_epi32(v_ll,v_loff);
v_it = _mm512_mullo_epi32(v_mxyv4,v_ll);
v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm));
v_nm = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it);
/* amx = 1.0f - dxp; */
/* amy = 1.0f - dyp; */
/* amz = 1.0f - dzp; */
v_amx = _mm512_sub_ps(v_one,v_dxp);
v_amy = _mm512_sub_ps(v_one,v_dyp);
v_amz = _mm512_sub_ps(v_one,v_dzp);
/* dx1 = dxp*dyp; */
/* dyp = amx*dyp; */
/* amx = amx*amy; */
/* amy = dxp*amy; */
v_dx1 = _mm512_mul_ps(v_dxp,v_dyp);
v_dyp = _mm512_mul_ps(v_amx,v_dyp);
v_amx = _mm512_mul_ps(v_amx,v_amy);
v_amy = _mm512_mul_ps(v_dxp,v_amy);
/* find electric field */
/* nn = nm; */
_mm512_store_epi32(kk,v_nm);
/* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[nn:nn+3] field components */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find first part of electric field */
/* dx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */
v_dx = _mm512_mul_ps(v_amx,a);
v_dx = _mm512_fmadd_ps(v_amy,p,v_dx);
/* dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */
v_dy = _mm512_mul_ps(v_amx,b);
v_dy = _mm512_fmadd_ps(v_amy,q,v_dy);
/* dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */
v_dz = _mm512_mul_ps(v_amx,c);
v_dz = _mm512_fmadd_ps(v_amy,r,v_dz);
/* mm = nn + 4*mxv; */
/* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find second part of electric field */
/* dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */
v_dx = _mm512_fmadd_ps(v_dyp,a,v_dx);
v_dx = _mm512_fmadd_ps(v_dx1,p,v_dx);
v_dx = _mm512_mul_ps(v_amz,v_dx);
/* dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */
v_dy = _mm512_fmadd_ps(v_dyp,b,v_dy);
v_dy = _mm512_fmadd_ps(v_dx1,q,v_dy);
v_dy = _mm512_mul_ps(v_amz,v_dy);
/* dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */
v_dz = _mm512_fmadd_ps(v_dyp,c,v_dz);
v_dz = _mm512_fmadd_ps(v_dx1,r,v_dz);
v_dz = _mm512_mul_ps(v_amz,v_dz);
/* nn += 4*mxyv; */
v_nn = _mm512_add_epi32(v_nm,v_mxyv4);
_mm512_store_epi32(kk,v_nn);
/* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[nn:nn+3] field components */
/* where nn = nn + 4*mxyv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */
/* where nn = nn + 4*mxyv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find third part of electric field */
/* vx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */
v_vx = _mm512_mul_ps(v_amx,a);
v_vx = _mm512_fmadd_ps(v_amy,p,v_vx);
/* vy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */
v_vy = _mm512_mul_ps(v_amx,b);
v_vy = _mm512_fmadd_ps(v_amy,q,v_vy);
/* vz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */
v_vz = _mm512_mul_ps(v_amx,c);
v_vz = _mm512_fmadd_ps(v_amy,r,v_vz);
/* mm = nn + 4*mxv; */
/* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find fourth part of electric field */
/* dx = dx + dzp*(vx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */
v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx);
v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx);
v_dx = _mm512_fmadd_ps(v_dzp,v_vx,v_dx);
/* dy = dy + dzp*(vy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */
v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy);
v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy);
v_dy = _mm512_fmadd_ps(v_dzp,v_vy,v_dy);
/* dz = dz + dzp*(vz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */
v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz);
v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz);
v_dz = _mm512_fmadd_ps(v_dzp,v_vz,v_dz);
/* find magnetic field */
/* nn = nm; */
_mm512_store_epi32(kk,v_nm);
/* load sbxyz[nn:nn+3] and sbxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sbxyz[nn:nn+3] field components */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sbxyz[nn+4:nn+7] field components */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find first part of magnetic field */
/* ox = amx*sbxyz[nn] + amy*sbxyz[nn+4]; */
v_ox = _mm512_mul_ps(v_amx,a);
v_ox = _mm512_fmadd_ps(v_amy,p,v_ox);
/* oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; */
v_oy = _mm512_mul_ps(v_amx,b);
v_oy = _mm512_fmadd_ps(v_amy,q,v_oy);
/* oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; */
v_oz = _mm512_mul_ps(v_amx,c);
v_oz = _mm512_fmadd_ps(v_amy,r,v_oz);
/* mm = nn + 4*mxv; */
/* load sbxyz[mm:mm+3] and sbxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sbxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sbxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find second part of magnetic field */
/* ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); */
v_ox = _mm512_fmadd_ps(v_dyp,a,v_ox);
v_ox = _mm512_fmadd_ps(v_dx1,p,v_ox);
v_ox = _mm512_mul_ps(v_amz,v_ox);
/* oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); */
v_oy = _mm512_fmadd_ps(v_dyp,b,v_oy);
v_oy = _mm512_fmadd_ps(v_dx1,q,v_oy);
v_oy = _mm512_mul_ps(v_amz,v_oy);
/* oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); */
v_oz = _mm512_fmadd_ps(v_dyp,c,v_oz);
v_oz = _mm512_fmadd_ps(v_dx1,r,v_oz);
v_oz = _mm512_mul_ps(v_amz,v_oz);
/* nn += 4*mxyv; */
v_nn = _mm512_add_epi32(v_nm,v_mxyv4);
_mm512_store_epi32(kk,v_nn);
/* load sbxyz[nn:nn+3] and sbxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sbxyz[nn:nn+3] field components */
/* where nn = nn + 4*mxyv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sbxyz[nn+4:nn+7] field components */
/* where nn = nn + 4*mxyv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find third part of magnetic field */
/* vx = amx*sbxyz[nn] + amy*sbxyz[nn+4]; */
v_vx = _mm512_mul_ps(v_amx,a);
v_vx = _mm512_fmadd_ps(v_amy,p,v_vx);
/* vy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; */
v_vy = _mm512_mul_ps(v_amx,b);
v_vy = _mm512_fmadd_ps(v_amy,q,v_vy);
/* vz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; */
v_vz = _mm512_mul_ps(v_amx,c);
v_vz = _mm512_fmadd_ps(v_amy,r,v_vz);
/* mm = nn + 4*mxv; */
/* load sbxyz[mm:mm+3] and sbxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sbxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sbxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find fourth part of magnetic field */
/* ox = ox + dzp*(vx + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); */
v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx);
v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx);
v_ox = _mm512_fmadd_ps(v_dzp,v_vx,v_ox);
/* oy = oy + dzp*(vy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); */
v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy);
v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy);
v_oy = _mm512_fmadd_ps(v_dzp,v_vy,v_oy);
/* oz = oz + dzp*(vz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); */
v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz);
v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz);
v_oz = _mm512_fmadd_ps(v_dzp,v_vz,v_oz);
/* calculate half impulse */
/* dx *= qtmh; */
/* dy *= qtmh; */
/* dz *= qtmh; */
v_dx = _mm512_mul_ps(v_dx,v_qtmh);
v_dy = _mm512_mul_ps(v_dy,v_qtmh);
v_dz = _mm512_mul_ps(v_dz,v_qtmh);
/* half acceleration */
/* acx = ppart[j+3*nppmx+npoff] + dx; */
/* acy = ppart[j+4*nppmx+npoff] + dy; */
/* acz = ppart[j+5*nppmx+npoff] + dz; */
a = _mm512_add_ps(v_dx,_mm512_load_ps(&ppart[j+3*nppmx+npoff]));
b = _mm512_add_ps(v_dy,_mm512_load_ps(&ppart[j+4*nppmx+npoff]));
c = _mm512_add_ps(v_dz,_mm512_load_ps(&ppart[j+5*nppmx+npoff]));
/* time-centered kinetic energy */
/* sum1 += (acx*acx + acy*acy + acz*acz); */
v_at = _mm512_fmadd_ps(b,b,_mm512_mul_ps(a,a));
v_at = _mm512_fmadd_ps(c,c,v_at);
/* convert to double precision before accumulating */
v_sum1 = _mm512_add_pd(v_sum1,_mm512_cvtpslo_pd(v_at));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_at,78));
v_sum1 = _mm512_add_pd(v_sum1,v_d);
/* calculate cyclotron frequency */
/* omxt = qtmh*ox; */
/* omyt = qtmh*oy; */
/* omzt = qtmh*oz; */
e = _mm512_mul_ps(v_qtmh,v_ox);
f = _mm512_mul_ps(v_qtmh,v_oy);
g = _mm512_mul_ps(v_qtmh,v_oz);
/* calculate rotation matrix */
/* vx = omxt*omxt; */
v_vx = _mm512_mul_ps(e,e);
/* vy = omyt*omyt; */
v_vy = _mm512_mul_ps(f,f);
/* vz = omzt*omzt; */
v_vz = _mm512_mul_ps(g,g);
/* omt = omxt*omxt + omyt*omyt + omzt*omzt; */
v_at = _mm512_add_ps(_mm512_add_ps(v_vx,v_vy),v_vz);
/* anorm = 2.0f/(1.0f + omt); */
d = _mm512_div_ps(v_two,_mm512_add_ps(v_one,v_at));
/* omt = 0.5f*(1.0f - omt); */
h = _mm512_mul_ps(v_half,_mm512_sub_ps(v_one,v_at));
/* vx = (omt + vx)*acx; */
v_vx = _mm512_mul_ps(_mm512_add_ps(h,v_vx),a);
/* vy = (omt + vy)*acy; */
v_vy = _mm512_mul_ps(_mm512_add_ps(h,v_vy),b);
/* vz = (omt + vz)*acz; */
v_vz = _mm512_mul_ps(_mm512_add_ps(h,v_vz),c);
/* omt = omxt*omyt; */
h = _mm512_mul_ps(e,f);
/* vx = vx + (omzt + omt)*acy; */
v_vx = _mm512_fmadd_ps(_mm512_add_ps(h,g),b,v_vx);
/* vy = vy + (omt - omzt)*acx; */
v_vy = _mm512_fmadd_ps(_mm512_sub_ps(h,g),a,v_vy);
/* omt = omxt*omzt; */
h = _mm512_mul_ps(e,g);
/* vx = vx + (omt - omyt)*acz; */
v_vx = _mm512_fmadd_ps(_mm512_sub_ps(h,f),c,v_vx);
/* vz = vz + (omt + omyt)*acx; */
v_vz = _mm512_fmadd_ps(_mm512_add_ps(h,f),a,v_vz);
/* omt = omyt*omzt; */
h = _mm512_mul_ps(f,g);
/* vy = vy + (omt + omxt)*acz; */
v_vy = _mm512_fmadd_ps(_mm512_add_ps(h,e),c,v_vy);
/* vz = vz + (omt - omxt)*acy; */
v_vz = _mm512_fmadd_ps(_mm512_sub_ps(h,e),b,v_vz);
/* new velocity */
/* vx = dx + (rot1*acx + rot2*acy + rot3*acz)*anorm; */
/* vy = dy + (rot4*acx + rot5*acy + rot6*acz)*anorm; */
/* vz = dz + (rot7*acx + rot8*acy + rot9*acz)*anorm; */
v_vx = _mm512_fmadd_ps(v_vx,d,v_dx);
v_vy = _mm512_fmadd_ps(v_vy,d,v_dy);
v_vz = _mm512_fmadd_ps(v_vz,d,v_dz);
/* new position */
/* dx = x + vx*dtc; */
/* dy = y + vy*dtc; */
/* dz = z + vz*dtc; */
v_dx = _mm512_fmadd_ps(v_vx,v_dtc,v_x);
v_dy = _mm512_fmadd_ps(v_vy,v_dtc,v_y);
v_dz = _mm512_fmadd_ps(v_vz,v_dtc,v_z);
/* reflecting boundary conditions */
if (ipbc==2) {
/* if ((dx < edgelx) || (dx >= edgerx)) { */
/* dx = x; */
/* vx = -vx; */
/* } */
msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx,
_MM_CMPINT_GE));
v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x);
v_vx = _mm512_mask_sub_ps(v_vx,msk,v_zero,v_vx);
/* if ((dy < edgely) || (dy >= edgery)) { */
/* dy = y; */
/* vy = -vy; */
/* } */
msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery,
_MM_CMPINT_GE));
v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y);
v_vy = _mm512_mask_sub_ps(v_vy,msk,v_zero,v_vy);
/* if ((dz < edgelz) || (dz >= edgerz)) { */
/* dz = z; */
/* vz = -vz; */
/* } */
msk = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dz,v_edgerz,
_MM_CMPINT_GE));
v_dz = _mm512_mask_blend_ps(msk,v_dz,v_z);
v_vz = _mm512_mask_sub_ps(v_vz,msk,v_zero,v_vz);
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
/* if ((dx < edgelx) || (dx >= edgerx)) { */
/* dx = x; */
/* vx = -vx; */
/* } */
msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx,
_MM_CMPINT_GE));
v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x);
v_vx = _mm512_mask_sub_ps(v_vx,msk,v_zero,v_vx);
/* if ((dy < edgely) || (dy >= edgery)) { */
/* dy = y; */
/* vy = -vy; */
/* } */
msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery,
_MM_CMPINT_GE));
v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y);
v_vy = _mm512_mask_sub_ps(v_vy,msk,v_zero,v_vy);
}
/* set new position */
/* ppart[j+npoff] = dx; */
/* ppart[j+nppmx+npoff] = dy; */
/* ppart[j+2*nppmx+npoff] = dz; */
_mm512_store_ps(&ppart[j+npoff],v_dx);
_mm512_store_ps(&ppart[j+nppmx+npoff],v_dy);
_mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz);
/* set new velocity */
/* ppart[j+3*nppmx+npoff] = vx; */
/* ppart[j+4*nppmx+npoff] = vy; */
/* ppart[j+5*nppmx+npoff] = vz; */
_mm512_store_ps(&ppart[j+3*nppmx+npoff],v_vx);
_mm512_store_ps(&ppart[j+4*nppmx+npoff],v_vy);
_mm512_store_ps(&ppart[j+5*nppmx+npoff],v_vz);
}
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
z = ppart[j+2*nppmx+npoff];
nn = x;
mm = y;
ll = z;
dxp = x - (float) nn;
dyp = y - (float) mm;
dzp = z - (float) ll;
nm = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff));
amx = 1.0f - dxp;
amy = 1.0f - dyp;
dx1 = dxp*dyp;
dyp = amx*dyp;
amx = amx*amy;
amz = 1.0f - dzp;
amy = dxp*amy;
/* find electric field */
nn = nm;
dx = amx*sfxyz[nn] + amy*sfxyz[nn+4];
dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4];
dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4];
mm = nn + 4*mxv;
dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]);
dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]);
dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]);
nn += 4*mxyv;
acx = amx*sfxyz[nn] + amy*sfxyz[nn+4];
acy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4];
acz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4];
mm = nn + 4*mxv;
dx = dx + dzp*(acx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]);
dy = dy + dzp*(acy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]);
dz = dz + dzp*(acz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]);
/* find magnetic field */
nn = nm;
ox = amx*sbxyz[nn] + amy*sbxyz[nn+4];
oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4];
oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4];
mm = nn + 4*mxv;
ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]);
oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]);
oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]);
nn += 4*mxyv;
acx = amx*sbxyz[nn] + amy*sbxyz[nn+4];
acy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4];
acz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4];
mm = nn + 4*mxv;
ox = ox + dzp*(acx + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]);
oy = oy + dzp*(acy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]);
oz = oz + dzp*(acz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]);
/* calculate half impulse */
dx *= qtmh;
dy *= qtmh;
dz *= qtmh;
/* half acceleration */
acx = ppart[j+3*nppmx+npoff] + dx;
acy = ppart[j+4*nppmx+npoff] + dy;
acz = ppart[j+5*nppmx+npoff] + dz;
/* time-centered kinetic energy */
sum1 += (acx*acx + acy*acy + acz*acz);
/* calculate cyclotron frequency */
omxt = qtmh*ox;
omyt = qtmh*oy;
omzt = qtmh*oz;
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0f/(1.0f + omt);
omt = 0.5f*(1.0f - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new velocity */
vx = dx + (rot1*acx + rot2*acy + rot3*acz)*anorm;
vy = dy + (rot4*acx + rot5*acy + rot6*acz)*anorm;
vz = dz + (rot7*acx + rot8*acy + rot9*acz)*anorm;
/* new position */
dx = x + vx*dtc;
dy = y + vy*dtc;
dz = z + vz*dtc;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
vx = -vx;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
vy = -vy;
}
if ((dz < edgelz) || (dz >= edgerz)) {
dz = z;
vz = -vz;
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
vx = -vx;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
vy = -vy;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
ppart[j+2*nppmx+npoff] = dz;
/* set new velocity */
ppart[j+3*nppmx+npoff] = vx;
ppart[j+4*nppmx+npoff] = vy;
ppart[j+5*nppmx+npoff] = vz;
}
/* sum2 += sum1; */
_mm512_store_pd(&dd[0],v_sum1);
for (j = 1; j < 8; j++) {
dd[0] += dd[j];
}
sum2 += (sum1 + dd[0]);
}
/* normalize kinetic energy */
*ek += 0.5f*sum2;
return;
#undef MXV
#undef MYV
#undef MZV
}
/*--------------------------------------------------------------------*/
void ckncgbppushf3lt(float ppart[], float fxyz[], float bxyz[],
int kpic[], int ncl[], int ihole[], float qbm,
float dt, float dtc, float *ek, int idimp,
int nppmx, int nx, int ny, int nz, int mx, int my,
int mz, int nxv, int nyv, int nzv, int mx1,
int my1, int mxyz1, int ntmax, int *irc) {
/* for 3d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, with magnetic field. Using the Boris Mover.
also determines list of particles which are leaving this tile
OpenMP/vector version using guard cells
data read in tiles
particles stored segmented array
190 flops/particle, 1 divide, 54 loads, 6 stores
input: all except ncl, ihole, irc, output: ppart, ncl, ihole, ek, irc
velocity equations used are:
vx(t+dt/2) = rot(1)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) +
rot(2)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) +
rot(3)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) +
.5*(q/m)*fx(x(t),y(t),z(t))*dt)
vy(t+dt/2) = rot(4)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) +
rot(5)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) +
rot(6)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) +
.5*(q/m)*fy(x(t),y(t),z(t))*dt)
vz(t+dt/2) = rot(7)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) +
rot(8)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) +
rot(9)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) +
.5*(q/m)*fz(x(t),y(t),z(t))*dt)
where q/m is charge/mass, and the rotation matrix is given by:
rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2)
rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2)
rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2)
and om**2 = omx**2 + omy**2 + omz**2
the rotation matrix is determined by:
omx = (q/m)*bx(x(t),y(t),z(t)), omy = (q/m)*by(x(t),y(t),z(t)), and
omz = (q/m)*bz(x(t),y(t),z(t)).
position equations used are:
x(t+dt)=x(t) + vx(t+dt/2)*dt
y(t+dt)=y(t) + vy(t+dt/2)*dt
z(t+dt)=z(t) + vz(t+dt/2)*dt
fx(x(t),y(t),z(t)), fy(x(t),y(t),z(t)), and fz(x(t),y(t),z(t)),
bx(x(t),y(t),z(t)), by(x(t),y(t),z(t)), and bz(x(t),y(t),z(t))
are approximated by interpolation from the nearest grid points:
fx(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fx(n,m,l)+dx*fx(n+1,m,l))
+ dy*((1-dx)*fx(n,m+1,l) + dx*fx(n+1,m+1,l)))
+ dz*((1-dy)*((1-dx)*fx(n,m,l+1)+dx*fx(n+1,m,l+1))
+ dy*((1-dx)*fx(n,m+1,l+1) + dx*fx(n+1,m+1,l+1)))
where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l
similarly for fy(x,y,z), fz(x,y,z), bx(x,y,z), by(x,y,z), bz(x,y,z)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = position z of particle n in tile m
ppart[m][3][n] = velocity vx of particle n in tile m
ppart[m][4][n] = velocity vy of particle n in tile m
ppart[m][5][n] = velocity vz of particle n in tile m
fxyz[l][k][j][0] = x component of force/charge at grid (j,k,l)
fxyz[l][k][j][1] = y component of force/charge at grid (j,k,l)
fxyz[l][k][j][2] = z component of force/charge at grid (j,k,l)
that is, convolution of electric field over particle shape
bxyz[l][k][j][0] = x component of magnetic field at grid (j,k,l)
bxyz[l][k][j][1] = y component of magnetic field at grid (j,k,l)
bxyz[l][k][j][2] = z component of magnetic field at grid (j,k,l)
that is, the convolution of magnetic field over particle shape
kpic[l] = number of particles in tile l
ncl[l][i] = number of particles going to destination i, tile l
ihole[l][:][0] = location of hole in array left by departing particle
ihole[l][:][1] = direction destination of particle leaving hole
all for tile l
ihole[l][0][0] = ih, number of holes left (error, if negative)
qbm = particle charge/mass ratio
dt = time interval between successive force calculations
dtc = time interval between successive co-ordinate calculations
kinetic energy/mass at time t is also calculated, using
ek = .5*sum((vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 +
(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 +
.25*(vz(t+dt/2) + vz(t-dt/2))**2)
idimp = size of phase space = 6
nppmx = maximum number of particles in tile
nx/ny/nz = system length in x/y/z direction
mx/my/mz = number of grids in sorting cell in x/y/z
nxv = second dimension of field arrays, must be >= nx+1
nyv = third dimension of field arrays, must be >= ny+1
nzv = fourth dimension of field array, must be >= nz+1
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mxyz1 = mx1*my1*mz1,
where mz1 = (system length in z direction - 1)/mz + 1
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
requires KNC, ppart needs to be 64 byte aligned
nppmx needs to be a multiple of 16
fxyz needs to have 4 components, although one is not used
local data */
#define MXV 17
#define MYV 17
#define MZV 17
int mxy1, noff, moff, loff, npoff, npp, nps;
int i, j, k, l, m, ii, ih, nh, nn, mm, ll, nm, mxv, myv, mxyv, nxyv;
float anx, any, anz, edgelx, edgely, edgelz, edgerx, edgery, edgerz;
float qtmh, dxp, dyp, dzp, amx, amy, amz, dx, dy, dz, ox, oy, oz, dx1;
float acx, acy, acz, omxt, omyt, omzt, omt, anorm;
float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9;
float x, y, z, vx, vy, vz;
double sum1, sum2;
__m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4;
__m512i v_nn, v_mm, v_ll, v_nm, v_it, v_0, v_1, v_3, v_9, v_perm;
__m512 v_qtmh, v_dt, v_dtc, v_one, v_zero, v_anx, v_any, v_anz;
__m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz;
__m512 v_dx1, v_at, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz;
__m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz;
__m512 a, b, c, d, e, f, g, h, p, q, r, s;
__m512 v_two, v_half, v_ox, v_oy, v_oz;
__m512d v_sum1, v_d;
__mmask16 msk1, msk2;
__attribute__((aligned(64))) unsigned int kk[16];
__attribute__((aligned(64))) double dd[8];
__attribute__((aligned(64))) float sfxyz[4*MXV*MYV*MZV];
__attribute__((aligned(64))) float sbxyz[4*MXV*MYV*MZV];
/* __attribute__((aligned(64))) float sfxyz[4*(mx+1)*(my+1)*(mz+1)]; */
/* __attribute__((aligned(64))) float sbxyz[4*(mx+1)*(my+1)*(mz+1)]; */
mxy1 = mx1*my1;
/* mxv = MXV; */
/* myv = MYV; */
mxv = mx+1;
myv = my+1;
mxyv = mxv*myv;
nxyv = nxv*nyv;
qtmh = 0.5f*qbm*dt;
anx = (float) nx;
any = (float) ny;
anz = (float) nz;
sum2 = 0.0;
/* set boundary values */
v_mxv4 = _mm512_set1_epi32(4*mxv);
v_mxyv4 = _mm512_set1_epi32(4*mxyv);
v_0 = _mm512_set1_epi32(0);
v_1 = _mm512_set1_epi32(1);
v_3 = _mm512_set1_epi32(3);
v_9 = _mm512_set1_epi32(9);
v_perm = _mm512_set_epi32(15,11,7,3,14,10,6,2,13,9,5,1,12,8,4,0);
v_qtmh = _mm512_set1_ps(qtmh);
v_dt = _mm512_set1_ps(dt);
v_dtc = _mm512_set1_ps(dtc);
v_one = _mm512_set1_ps(1.0f);
v_zero = _mm512_setzero_ps();
v_two = _mm512_set1_ps(2.0f);
v_half = _mm512_set1_ps(0.5f);
v_anx = _mm512_set1_ps(anx);
v_any = _mm512_set1_ps(any);
v_anz = _mm512_set1_ps(anz);
v_sum1 = _mm512_set1_pd(0.0);
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,l,m,ii,noff,moff,loff,npp,npoff,nps,nn,mm,ll,nm,ih,nh,x, \
y,z,vx,vy,vz,dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,ox,oy,oz,acx,acy,acz, \
omxt,omyt,omzt,omt,anorm,rot1,rot2,rot3,rot4,rot5,rot6,rot7,rot8,rot9, \
edgelx,edgely,edgelz,edgerx,edgery,edgerz,sum1,v_noff,v_moff,v_loff, \
v_nn,v_mm,v_ll,v_nm,v_it,v_x,v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx,v_amy, \
v_amz,v_dx1,v_dx,v_dy,v_dz,v_vx,v_vy,v_vz,v_ox,v_oy,v_oz,v_at,v_edgelx, \
v_edgely,v_edgelz,v_edgerx,v_edgery,v_edgerz,v_d,v_sum1,a,b,c,d,e,f,g, \
h,p,q,r,s,msk1,msk2,kk,dd,sfxyz,sbxyz) \
reduction(+:sum2)
for (l = 0; l < mxyz1; l++) {
loff = l/mxy1;
k = l - mxy1*loff;
loff = mz*loff;
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
v_noff = _mm512_set1_epi32(noff);
v_moff = _mm512_set1_epi32(moff);
v_loff = _mm512_set1_epi32(loff);
npp = kpic[l];
npoff = idimp*nppmx*l;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
ll = nz - loff;
ll = mz < ll ? mz : ll;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
edgelz = loff;
edgerz = loff + ll;
v_edgelx = _mm512_set1_ps(edgelx);
v_edgely = _mm512_set1_ps(edgely);
v_edgelz = _mm512_set1_ps(edgelz);
v_edgerx = _mm512_set1_ps(edgerx);
v_edgery = _mm512_set1_ps(edgery);
v_edgerz = _mm512_set1_ps(edgerz);
ih = 0;
nh = 0;
nn += 1;
mm += 1;
ll += 1;
/* load local fields from global array */
nps = 4*(nn/4);
/* load electric field */
for (k = 0; k < ll; k++) {
for (j = 0; j < mm; j++) {
/* vector loop over elements in blocks of 4 */
/* for (i = 0; i < nn; i++) { */
/* sfxyz[4*(i+mxv*j+mxyv*k)] */
/* = fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sfxyz[1+4*(i+mxv*j+mxyv*k)] */
/* = fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sfxyz[2+4*(i+mxv*j+mxyv*k)] */
/* = fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* } */
for (i = 0; i < nps; i+=4) {
m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff));
v_at = _mm512_loadunpacklo_ps(v_at,&fxyz[m]);
v_at = _mm512_loadunpackhi_ps(v_at,&fxyz[m+16]);
m = 4*(i + mxv*j + mxyv*k);
_mm512_packstorelo_ps(&sfxyz[m],v_at);
_mm512_packstorehi_ps(&sfxyz[m+16],v_at);
}
/* loop over remaining elements */
for (i = nps; i < nn; i++) {
sfxyz[4*(i+mxv*j+mxyv*k)]
= fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sfxyz[1+4*(i+mxv*j+mxyv*k)]
= fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sfxyz[2+4*(i+mxv*j+mxyv*k)]
= fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sfxyz[3+4*(i+mxv*j+mxyv*k)]
= fxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
}
}
}
/* load magnetic field */
for (k = 0; k < ll; k++) {
for (j = 0; j < mm; j++) {
/* vector loop over elements in blocks of 4 */
/* for (i = 0; i < nn; i++) { */
/* sbxyz[4*(i+mxv*j+mxyv*k)] */
/* = bxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sbxyz[1+4*(i+mxv*j+mxyv*k)] */
/* = bxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sbxyz[2+4*(i+mxv*j+mxyv*k)] */
/* = bxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* } */
for (i = 0; i < nps; i+=4) {
m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff));
v_at = _mm512_loadunpacklo_ps(v_at,&bxyz[m]);
v_at = _mm512_loadunpackhi_ps(v_at,&bxyz[m+16]);
m = 4*(i + mxv*j + mxyv*k);
_mm512_packstorelo_ps(&sbxyz[m],v_at);
_mm512_packstorehi_ps(&sbxyz[m+16],v_at);
}
/* loop over remaining elements */
for (i = nps; i < nn; i++) {
sbxyz[4*(i+mxv*j+mxyv*k)]
= bxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sbxyz[1+4*(i+mxv*j+mxyv*k)]
= bxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sbxyz[2+4*(i+mxv*j+mxyv*k)]
= bxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sbxyz[3+4*(i+mxv*j+mxyv*k)]
= bxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
}
}
}
/* clear counters */
/* for (j = 0; j < 26; j++) { */
/* ncl[j+26*l] = 0; */
/* } */
memset((void*)&ncl[26*l],0,26*sizeof(int));
nps = 16*(npp/16);
sum1 = 0.0;
v_sum1 = _mm512_set1_pd(0.0);
/* loop over particles in tile in blocks of 16 */
for (j = 0; j < nps; j+=16) {
/* find interpolation weights */
/* x = ppart[j+npoff]; */
/* y = ppart[j+nppmx+npoff]; */
/* z = ppart[j+2*nppmx+npoff]; */
v_x = _mm512_load_ps(&ppart[j+npoff]);
v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]);
v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]);
/* nn = x; */
/* mm = y; */
/* ll = z; */
v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
/* dxp = x - (float) nn; */
/* dyp = y - (float) mm; */
/* dzp = z - (float) ll; */
v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dxp = _mm512_sub_ps(v_x,v_dxp);
v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dyp = _mm512_sub_ps(v_y,v_dyp);
v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dzp = _mm512_sub_ps(v_z,v_dzp);
/* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */
v_nn = _mm512_sub_epi32(v_nn,v_noff);
v_mm = _mm512_sub_epi32(v_mm,v_moff);
v_ll = _mm512_sub_epi32(v_ll,v_loff);
v_it = _mm512_mullo_epi32(v_mxyv4,v_ll);
v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm));
v_nm = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it);
/* amx = 1.0f - dxp; */
/* amy = 1.0f - dyp; */
/* amz = 1.0f - dzp; */
v_amx = _mm512_sub_ps(v_one,v_dxp);
v_amy = _mm512_sub_ps(v_one,v_dyp);
v_amz = _mm512_sub_ps(v_one,v_dzp);
/* dx1 = dxp*dyp; */
/* dyp = amx*dyp; */
/* amx = amx*amy; */
/* amy = dxp*amy; */
v_dx1 = _mm512_mul_ps(v_dxp,v_dyp);
v_dyp = _mm512_mul_ps(v_amx,v_dyp);
v_amx = _mm512_mul_ps(v_amx,v_amy);
v_amy = _mm512_mul_ps(v_dxp,v_amy);
/* find electric field */
/* nn = nm; */
_mm512_store_epi32(kk,v_nm);
/* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[nn:nn+3] field components */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find first part of electric field */
/* dx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */
v_dx = _mm512_mul_ps(v_amx,a);
v_dx = _mm512_fmadd_ps(v_amy,p,v_dx);
/* dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */
v_dy = _mm512_mul_ps(v_amx,b);
v_dy = _mm512_fmadd_ps(v_amy,q,v_dy);
/* dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */
v_dz = _mm512_mul_ps(v_amx,c);
v_dz = _mm512_fmadd_ps(v_amy,r,v_dz);
/* mm = nn + 4*mxv; */
/* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find second part of electric field */
/* dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */
v_dx = _mm512_fmadd_ps(v_dyp,a,v_dx);
v_dx = _mm512_fmadd_ps(v_dx1,p,v_dx);
v_dx = _mm512_mul_ps(v_amz,v_dx);
/* dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */
v_dy = _mm512_fmadd_ps(v_dyp,b,v_dy);
v_dy = _mm512_fmadd_ps(v_dx1,q,v_dy);
v_dy = _mm512_mul_ps(v_amz,v_dy);
/* dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */
v_dz = _mm512_fmadd_ps(v_dyp,c,v_dz);
v_dz = _mm512_fmadd_ps(v_dx1,r,v_dz);
v_dz = _mm512_mul_ps(v_amz,v_dz);
/* nn += 4*mxyv; */
v_nn = _mm512_add_epi32(v_nm,v_mxyv4);
_mm512_store_epi32(kk,v_nn);
/* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[nn:nn+3] field components */
/* where nn = nn + 4*mxyv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */
/* where nn = nn + 4*mxyv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find third part of electric field */
/* vx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */
v_vx = _mm512_mul_ps(v_amx,a);
v_vx = _mm512_fmadd_ps(v_amy,p,v_vx);
/* vy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */
v_vy = _mm512_mul_ps(v_amx,b);
v_vy = _mm512_fmadd_ps(v_amy,q,v_vy);
/* vz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */
v_vz = _mm512_mul_ps(v_amx,c);
v_vz = _mm512_fmadd_ps(v_amy,r,v_vz);
/* mm = nn + 4*mxv; */
/* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find fourth part of electric field */
/* dx = dx + dzp*(vx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */
v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx);
v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx);
v_dx = _mm512_fmadd_ps(v_dzp,v_vx,v_dx);
/* dy = dy + dzp*(vy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */
v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy);
v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy);
v_dy = _mm512_fmadd_ps(v_dzp,v_vy,v_dy);
/* dz = dz + dzp*(vz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */
v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz);
v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz);
v_dz = _mm512_fmadd_ps(v_dzp,v_vz,v_dz);
/* find magnetic field */
/* nn = nm; */
_mm512_store_epi32(kk,v_nm);
/* load sbxyz[nn:nn+3] and sbxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sbxyz[nn:nn+3] field components */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sbxyz[nn+4:nn+7] field components */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find first part of magnetic field */
/* ox = amx*sbxyz[nn] + amy*sbxyz[nn+4]; */
v_ox = _mm512_mul_ps(v_amx,a);
v_ox = _mm512_fmadd_ps(v_amy,p,v_ox);
/* oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; */
v_oy = _mm512_mul_ps(v_amx,b);
v_oy = _mm512_fmadd_ps(v_amy,q,v_oy);
/* oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; */
v_oz = _mm512_mul_ps(v_amx,c);
v_oz = _mm512_fmadd_ps(v_amy,r,v_oz);
/* mm = nn + 4*mxv; */
/* load sbxyz[mm:mm+3] and sbxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sbxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sbxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find second part of magnetic field */
/* ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); */
v_ox = _mm512_fmadd_ps(v_dyp,a,v_ox);
v_ox = _mm512_fmadd_ps(v_dx1,p,v_ox);
v_ox = _mm512_mul_ps(v_amz,v_ox);
/* oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); */
v_oy = _mm512_fmadd_ps(v_dyp,b,v_oy);
v_oy = _mm512_fmadd_ps(v_dx1,q,v_oy);
v_oy = _mm512_mul_ps(v_amz,v_oy);
/* oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); */
v_oz = _mm512_fmadd_ps(v_dyp,c,v_oz);
v_oz = _mm512_fmadd_ps(v_dx1,r,v_oz);
v_oz = _mm512_mul_ps(v_amz,v_oz);
/* nn += 4*mxyv; */
v_nn = _mm512_add_epi32(v_nm,v_mxyv4);
_mm512_store_epi32(kk,v_nn);
/* load sbxyz[nn:nn+3] and sbxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sbxyz[nn:nn+3] field components */
/* where nn = nn + 4*mxyv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sbxyz[nn+4:nn+7] field components */
/* where nn = nn + 4*mxyv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find third part of magnetic field */
/* vx = amx*sbxyz[nn] + amy*sbxyz[nn+4]; */
v_vx = _mm512_mul_ps(v_amx,a);
v_vx = _mm512_fmadd_ps(v_amy,p,v_vx);
/* vy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; */
v_vy = _mm512_mul_ps(v_amx,b);
v_vy = _mm512_fmadd_ps(v_amy,q,v_vy);
/* vz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; */
v_vz = _mm512_mul_ps(v_amx,c);
v_vz = _mm512_fmadd_ps(v_amy,r,v_vz);
/* mm = nn + 4*mxv; */
/* load sbxyz[mm:mm+3] and sbxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sbxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sbxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find fourth part of magnetic field */
/* ox = ox + dzp*(vx + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); */
v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx);
v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx);
v_ox = _mm512_fmadd_ps(v_dzp,v_vx,v_ox);
/* oy = oy + dzp*(vy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); */
v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy);
v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy);
v_oy = _mm512_fmadd_ps(v_dzp,v_vy,v_oy);
/* oz = oz + dzp*(vz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); */
v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz);
v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz);
v_oz = _mm512_fmadd_ps(v_dzp,v_vz,v_oz);
/* calculate half impulse */
/* dx *= qtmh; */
/* dy *= qtmh; */
/* dz *= qtmh; */
v_dx = _mm512_mul_ps(v_dx,v_qtmh);
v_dy = _mm512_mul_ps(v_dy,v_qtmh);
v_dz = _mm512_mul_ps(v_dz,v_qtmh);
/* half acceleration */
/* acx = ppart[j+3*nppmx+npoff] + dx; */
/* acy = ppart[j+4*nppmx+npoff] + dy; */
/* acz = ppart[j+5*nppmx+npoff] + dz; */
a = _mm512_add_ps(v_dx,_mm512_load_ps(&ppart[j+3*nppmx+npoff]));
b = _mm512_add_ps(v_dy,_mm512_load_ps(&ppart[j+4*nppmx+npoff]));
c = _mm512_add_ps(v_dz,_mm512_load_ps(&ppart[j+5*nppmx+npoff]));
/* time-centered kinetic energy */
/* sum1 += (acx*acx + acy*acy + acz*acz); */
v_at = _mm512_fmadd_ps(b,b,_mm512_mul_ps(a,a));
v_at = _mm512_fmadd_ps(c,c,v_at);
/* convert to double precision before accumulating */
v_sum1 = _mm512_add_pd(v_sum1,_mm512_cvtpslo_pd(v_at));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_at,78));
v_sum1 = _mm512_add_pd(v_sum1,v_d);
/* calculate cyclotron frequency */
/* omxt = qtmh*ox; */
/* omyt = qtmh*oy; */
/* omzt = qtmh*oz; */
e = _mm512_mul_ps(v_qtmh,v_ox);
f = _mm512_mul_ps(v_qtmh,v_oy);
g = _mm512_mul_ps(v_qtmh,v_oz);
/* calculate rotation matrix */
/* vx = omxt*omxt; */
v_vx = _mm512_mul_ps(e,e);
/* vy = omyt*omyt; */
v_vy = _mm512_mul_ps(f,f);
/* vz = omzt*omzt; */
v_vz = _mm512_mul_ps(g,g);
/* omt = omxt*omxt + omyt*omyt + omzt*omzt; */
v_at = _mm512_add_ps(_mm512_add_ps(v_vx,v_vy),v_vz);
/* anorm = 2.0f/(1.0f + omt); */
d = _mm512_div_ps(v_two,_mm512_add_ps(v_one,v_at));
/* omt = 0.5f*(1.0f - omt); */
h = _mm512_mul_ps(v_half,_mm512_sub_ps(v_one,v_at));
/* vx = (omt + vx)*acx; */
v_vx = _mm512_mul_ps(_mm512_add_ps(h,v_vx),a);
/* vy = (omt + vy)*acy; */
v_vy = _mm512_mul_ps(_mm512_add_ps(h,v_vy),b);
/* vz = (omt + vz)*acz; */
v_vz = _mm512_mul_ps(_mm512_add_ps(h,v_vz),c);
/* omt = omxt*omyt; */
h = _mm512_mul_ps(e,f);
/* vx = vx + (omzt + omt)*acy; */
v_vx = _mm512_fmadd_ps(_mm512_add_ps(h,g),b,v_vx);
/* vy = vy + (omt - omzt)*acx; */
v_vy = _mm512_fmadd_ps(_mm512_sub_ps(h,g),a,v_vy);
/* omt = omxt*omzt; */
h = _mm512_mul_ps(e,g);
/* vx = vx + (omt - omyt)*acz; */
v_vx = _mm512_fmadd_ps(_mm512_sub_ps(h,f),c,v_vx);
/* vz = vz + (omt + omyt)*acx; */
v_vz = _mm512_fmadd_ps(_mm512_add_ps(h,f),a,v_vz);
/* omt = omyt*omzt; */
h = _mm512_mul_ps(f,g);
/* vy = vy + (omt + omxt)*acz; */
v_vy = _mm512_fmadd_ps(_mm512_add_ps(h,e),c,v_vy);
/* vz = vz + (omt - omxt)*acy; */
v_vz = _mm512_fmadd_ps(_mm512_sub_ps(h,e),b,v_vz);
/* new velocity */
/* vx = dx + (rot1*acx + rot2*acy + rot3*acz)*anorm; */
/* vy = dy + (rot4*acx + rot5*acy + rot6*acz)*anorm; */
/* vz = dz + (rot7*acx + rot8*acy + rot9*acz)*anorm; */
v_vx = _mm512_fmadd_ps(v_vx,d,v_dx);
v_vy = _mm512_fmadd_ps(v_vy,d,v_dy);
v_vz = _mm512_fmadd_ps(v_vz,d,v_dz);
/* new position */
/* dx = x + vx*dtc; */
/* dy = y + vy*dtc; */
/* dz = z + vz*dtc; */
v_dx = _mm512_fmadd_ps(v_vx,v_dtc,v_x);
v_dy = _mm512_fmadd_ps(v_vy,v_dtc,v_y);
v_dz = _mm512_fmadd_ps(v_vz,v_dtc,v_z);
/* find particles going out of bounds */
/* mm = 0; */
v_mm = _mm512_setzero_epi32();
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
/* if (dx >= edgerx) { */
/* if (dx >= anx) */
/* ppart[j+npoff] = dx - anx; */
/* mm = 2; */
/* } */
msk1 = _mm512_cmp_ps_mask(v_dx,v_edgerx,_MM_CMPINT_GE);
msk2 = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT);
ii = _mm512_mask2int(_mm512_kor(msk1,msk2));
/* execute if either test result is true for any particle */
if (ii != 0) {
ii = _mm512_mask2int(msk1);
v_x = v_dx;
/* write output if test result is true for any particle */
if (ii != 0) {
v_it = _mm512_add_epi32(v_1,v_1);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it);
msk1 = _mm512_cmp_ps_mask(v_dx,v_anx,_MM_CMPINT_GE);
v_x = _mm512_mask_sub_ps(v_x,msk1,v_dx,v_anx);
ii = _mm512_mask2int(msk1);
if (ii != 0)
v_dx = v_x;
}
/* if (dx < edgelx) { */
/* if (dx < 0.0) { */
/* dx += anx; */
/* if (dx < anx) */
/* mm = 1; */
/* else */
/* dx = 0.0; */
/* ppart[j+npoff] = dx; */
/* } */
/* else { */
/* mm = 1; */
/* } */
/* } */
/* write output if test result is true for any particle */
ii = _mm512_mask2int(msk2);
if (ii != 0) {
v_it = _mm512_mask_mov_epi32(v_0,msk2,v_1);
msk2 = _mm512_cmp_ps_mask(v_dx,v_zero,_MM_CMPINT_LT);
v_x = _mm512_mask_add_ps(v_x,msk2,v_dx,v_anx);
msk1 = _mm512_cmp_ps_mask(v_x,v_anx,_MM_CMPINT_GE);
msk1 = _mm512_kand(msk1,msk2);
v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero);
v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0);
v_mm = _mm512_add_epi32(v_mm,v_it);
ii = _mm512_mask2int(msk2);
if (ii != 0)
v_dx = v_x;
}
}
/* if (dy >= edgery) { */
/* if (dy >= any) */
/* ppart[j+nppmx+npoff] = dy - any; */
/* mm += 6; */
/* } */
msk1 = _mm512_cmp_ps_mask(v_dy,v_edgery,_MM_CMPINT_GE);
msk2 = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT);
ii = _mm512_mask2int(_mm512_kor(msk1,msk2));
/* execute if either test result is true for any particle */
if (ii != 0) {
ii = _mm512_mask2int(msk1);
v_x = v_dy;
/* write output if test result is true for any particle */
if (ii != 0) {
v_it = _mm512_add_epi32(v_3,v_3);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it);
msk1 = _mm512_cmp_ps_mask(v_dy,v_any,_MM_CMPINT_GE);
v_x = _mm512_mask_sub_ps(v_x,msk1,v_dy,v_any);
ii = _mm512_mask2int(msk1);
if (ii != 0)
v_dy = v_x;
}
/* if (dy < edgely) { */
/* if (dy < 0.0) { */
/* dy += any; */
/* if (dy < any) */
/* mm += 3; */
/* else */
/* dy = 0.0; */
/* ppart[j+nppmx+npoff] = dy; */
/* } */
/* else { */
/* mm += 3; */
/* } */
/* } */
/* write output if test result is true for any particle */
ii = _mm512_mask2int(msk2);
if (ii != 0) {
v_it = _mm512_mask_mov_epi32(v_0,msk2,v_3);
msk2 = _mm512_cmp_ps_mask(v_dy,v_zero,_MM_CMPINT_LT);
v_x = _mm512_mask_add_ps(v_x,msk2,v_dy,v_any);
msk1 = _mm512_cmp_ps_mask(v_x,v_any,_MM_CMPINT_GE);
msk1 = _mm512_kand(msk1,msk2);
v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero);
v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0);
v_mm = _mm512_add_epi32(v_mm,v_it);
ii = _mm512_mask2int(msk2);
if (ii != 0)
v_dy = v_x;
}
}
/* if (dz >= edgerz) { */
/* if (dz >= anz) */
/* ppart[j+2*nppmx+npoff] = dz - anz; */
/* mm += 18; */
/* } */
msk1 = _mm512_cmp_ps_mask(v_dz,v_edgerz,_MM_CMPINT_GE);
msk2 = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT);
ii = _mm512_mask2int(_mm512_kor(msk1,msk2));
/* execute if either test result is true for any particle */
if (ii != 0) {
ii = _mm512_mask2int(msk1);
v_x = v_dz;
/* write output if test result is true for any particle */
if (ii != 0) {
v_it = _mm512_add_epi32(v_9,v_9);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it);
msk1 = _mm512_cmp_ps_mask(v_dz,v_anz,_MM_CMPINT_GE);
v_x = _mm512_mask_sub_ps(v_x,msk1,v_dz,v_anz);
ii = _mm512_mask2int(msk1);
if (ii != 0)
v_dz = v_x;
}
/* if (dz < edgelz) { */
/* if (dz < 0.0) { */
/* dz += anz; */
/* if (dz < anz) */
/* mm += 9; */
/* else */
/* dz = 0.0; */
/* ppart[j+2*nppmx+npoff] = dz; */
/* } */
/* else { */
/* mm += 9; */
/* } */
/* } */
/* write output if test result is true for any particle */
ii = _mm512_mask2int(msk2);
if (ii != 0) {
v_it = _mm512_mask_mov_epi32(v_0,msk2,v_9);
msk2 = _mm512_cmp_ps_mask(v_dz,v_zero,_MM_CMPINT_LT);
v_x = _mm512_mask_add_ps(v_x,msk2,v_dz,v_anz);
msk1 = _mm512_cmp_ps_mask(v_x,v_anz,_MM_CMPINT_GE);
msk1 = _mm512_kand(msk1,msk2);
v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero);
v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0);
v_mm = _mm512_add_epi32(v_mm,v_it);
ii = _mm512_mask2int(msk2);
if (ii != 0)
v_dz = v_x;
}
}
/* set new position */
/* ppart[j+npoff] = dx; */
/* ppart[j+nppmx+npoff] = dy; */
/* ppart[j+2*nppmx+npoff] = dz; */
_mm512_store_ps(&ppart[j+npoff],v_dx);
_mm512_store_ps(&ppart[j+nppmx+npoff],v_dy);
_mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz);
/* set new velocity */
/* ppart[j+3*nppmx+npoff] = vx; */
/* ppart[j+4*nppmx+npoff] = vy; */
/* ppart[j+5*nppmx+npoff] = vz; */
_mm512_store_ps(&ppart[j+3*nppmx+npoff],v_vx);
_mm512_store_ps(&ppart[j+4*nppmx+npoff],v_vy);
_mm512_store_ps(&ppart[j+5*nppmx+npoff],v_vz);
/* increment counters */
/* if (mm > 0) { */
/* ncl[mm+26*l-1] += 1; */
/* ih += 1; */
/* if (ih <= ntmax) { */
/* ihole[2*(ih+(ntmax+1)*l)] = j + i + 1; */
/* ihole[1+2*(ih+(ntmax+1)*l)] = mm; */
/* } */
/* else { */
/* nh = 1; */
/* } */
/* } */
_mm512_store_epi32(kk,v_mm);
for (i = 0; i < 16; i++) {
mm = kk[i];
if (mm > 0) {
ncl[mm+26*l-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*l)] = j + i + 1;
ihole[1+2*(ih+(ntmax+1)*l)] = mm;
}
else {
nh = 1;
}
}
}
}
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
z = ppart[j+2*nppmx+npoff];
nn = x;
mm = y;
ll = z;
dxp = x - (float) nn;
dyp = y - (float) mm;
dzp = z - (float) ll;
nm = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff));
amx = 1.0f - dxp;
amy = 1.0f - dyp;
dx1 = dxp*dyp;
dyp = amx*dyp;
amx = amx*amy;
amz = 1.0f - dzp;
amy = dxp*amy;
/* find electric field */
nn = nm;
dx = amx*sfxyz[nn] + amy*sfxyz[nn+4];
dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4];
dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4];
mm = nn + 4*mxv;
dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]);
dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]);
dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]);
nn += 4*mxyv;
acx = amx*sfxyz[nn] + amy*sfxyz[nn+4];
acy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4];
acz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4];
mm = nn + 4*mxv;
dx = dx + dzp*(acx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]);
dy = dy + dzp*(acy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]);
dz = dz + dzp*(acz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]);
/* find magnetic field */
nn = nm;
ox = amx*sbxyz[nn] + amy*sbxyz[nn+4];
oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4];
oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4];
mm = nn + 4*mxv;
ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]);
oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]);
oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]);
nn += 4*mxyv;
acx = amx*sbxyz[nn] + amy*sbxyz[nn+4];
acy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4];
acz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4];
mm = nn + 4*mxv;
ox = ox + dzp*(acx + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]);
oy = oy + dzp*(acy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]);
oz = oz + dzp*(acz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]);
/* calculate half impulse */
dx *= qtmh;
dy *= qtmh;
dz *= qtmh;
/* half acceleration */
acx = ppart[j+3*nppmx+npoff] + dx;
acy = ppart[j+4*nppmx+npoff] + dy;
acz = ppart[j+5*nppmx+npoff] + dz;
/* time-centered kinetic energy */
sum1 += (acx*acx + acy*acy + acz*acz);
/* calculate cyclotron frequency */
omxt = qtmh*ox;
omyt = qtmh*oy;
omzt = qtmh*oz;
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0f/(1.0f + omt);
omt = 0.5f*(1.0f - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new velocity */
vx = dx + (rot1*acx + rot2*acy + rot3*acz)*anorm;
vy = dy + (rot4*acx + rot5*acy + rot6*acz)*anorm;
vz = dz + (rot7*acx + rot8*acy + rot9*acz)*anorm;
/* new position */
dx = x + vx*dtc;
dy = y + vy*dtc;
dz = z + vz*dtc;
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx = dx - anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0f;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy = dy - any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0f) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0f;
}
else {
mm += 3;
}
}
if (dz >= edgerz) {
if (dz >= anz)
dz = dz - anz;
mm += 18;
}
else if (dz < edgelz) {
if (dz < 0.0f) {
dz += anz;
if (dz < anz)
mm += 9;
else
dz = 0.0f;
}
else {
mm += 9;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
ppart[j+2*nppmx+npoff] = dz;
/* set new velocity */
ppart[j+3*nppmx+npoff] = vx;
ppart[j+4*nppmx+npoff] = vy;
ppart[j+5*nppmx+npoff] = vz;
/* increment counters */
if (mm > 0) {
ncl[mm+26*l-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*l)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*l)] = mm;
}
else {
nh = 1;
}
}
}
/* sum2 += sum1; */
_mm512_store_pd(&dd[0],v_sum1);
for (j = 1; j < 8; j++) {
dd[0] += dd[j];
}
sum2 += (sum1 + dd[0]);
/* set error and end of file flag */
if (nh > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*l] = ih;
}
/* normalize kinetic energy */
*ek += 0.5f*sum2;
return;
#undef MXV
#undef MYV
#undef MZV
}
/*--------------------------------------------------------------------*/
void ckncgrbppush3lt(float ppart[], float fxyz[], float bxyz[],
int kpic[], float qbm, float dt, float dtc,
float ci, float *ek, int idimp, int nppmx, int nx,
int ny, int nz, int mx, int my, int mz, int nxv,
int nyv, int nzv, int mx1, int my1, int mxyz1,
int ipbc) {
/* for 3d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, for relativistic particles with magnetic field
Using the Boris Mover.
OpenMP/vector version using guard cells
data read in tiles
particles stored segmented array
202 flops/particle, 4 divides, 2 sqrts, 54 loads, 6 stores
input: all, output: ppart, ek
momentum equations used are:
px(t+dt/2) = rot(1)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) +
rot(2)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) +
rot(3)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) +
.5*(q/m)*fx(x(t),y(t),z(t))*dt)
py(t+dt/2) = rot(4)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) +
rot(5)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) +
rot(6)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) +
.5*(q/m)*fy(x(t),y(t),z(t))*dt)
pz(t+dt/2) = rot(7)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) +
rot(8)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) +
rot(9)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) +
.5*(q/m)*fz(x(t),y(t),z(t))*dt)
where q/m is charge/mass, and the rotation matrix is given by:
rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2)
rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2)
rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2)
and om**2 = omx**2 + omy**2 + omz**2
the rotation matrix is determined by:
omx = (q/m)*bx(x(t),y(t),z(t))*gami,
omy = (q/m)*by(x(t),y(t),z(t))*gami,
omz = (q/m)*bz(x(t),y(t),z(t))*gami,
where gami = 1./sqrt(1.+(px(t)*px(t)+py(t)*py(t)+pz(t)*pz(t))*ci*ci)
position equations used are:
x(t+dt) = x(t) + px(t+dt/2)*dtg
y(t+dt) = y(t) + py(t+dt/2)*dtg
z(t+dt) = z(t) + pz(t+dt/2)*dtg
where dtg = dtc/sqrt(1.+(px(t+dt/2)*px(t+dt/2)+py(t+dt/2)*py(t+dt/2)+
pz(t+dt/2)*pz(t+dt/2))*ci*ci)
fx(x(t),y(t),z(t)), fy(x(t),y(t),z(t)), and fz(x(t),y(t),z(t)),
bx(x(t),y(t),z(t)), by(x(t),y(t),z(t)), and bz(x(t),y(t),z(t))
are approximated by interpolation from the nearest grid points:
fx(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fx(n,m,l)+dx*fx(n+1,m,l))
+ dy*((1-dx)*fx(n,m+1,l) + dx*fx(n+1,m+1,l)))
+ dz*((1-dy)*((1-dx)*fx(n,m,l+1)+dx*fx(n+1,m,l+1))
+ dy*((1-dx)*fx(n,m+1,l+1) + dx*fx(n+1,m+1,l+1)))
where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l
similarly for fy(x,y,z), fz(x,y,z), bx(x,y,z), by(x,y,z), bz(x,y,z)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = position z of particle n in tile m
ppart[m][3][n] = momentum px of particle n in tile m
ppart[m][4][n] = momentum py of particle n in tile m
ppart[m][5][n] = momentum pz of particle n in tile m
fxyz[l][k][j][0] = x component of force/charge at grid (j,k,l)
fxyz[l][k][j][1] = y component of force/charge at grid (j,k,l)
fxyz[l][k][j][2] = z component of force/charge at grid (j,k,l)
that is, convolution of electric field over particle shape
bxyz[l][k][j][0] = x component of magnetic field at grid (j,k,l)
bxyz[l][k][j][1] = y component of magnetic field at grid (j,k,l)
bxyz[l][k][j][2] = z component of magnetic field at grid (j,k,l)
that is, the convolution of magnetic field over particle shape
kpic = number of particles per tile
qbm = particle charge/mass ratio
dt = time interval between successive force calculations
dtc = time interval between successive co-ordinate calculations
ci = reciprocal of velocity of light
kinetic energy/mass at time t is also calculated, using
ek = gami*sum((px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 +
(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 +
(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)/(1. + gami)
idimp = size of phase space = 6
nppmx = maximum number of particles in tile
nx/ny/nz = system length in x/y/z direction
mx/my/mz = number of grids in sorting cell in x/y/z
nxv = second dimension of field arrays, must be >= nx+1
nyv = third dimension of field arrays, must be >= ny+1
nzv = fourth dimension of field array, must be >= nz+1
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mxyz1 = mx1*my1*mz1,
where mz1 = (system length in z direction - 1)/mz + 1
ipbc = particle boundary condition = (0,1,2,3) =
(none,3d periodic,3d reflecting,mixed 2d reflecting/1d periodic)
requires KNC, ppart needs to be 64 byte aligned
nppmx needs to be a multiple of 16
fxyz needs to have 4 components, although one is not used
local data */
#define MXV 17
#define MYV 17
#define MZV 17
int mxy1, noff, moff, loff, npoff, npp, nps;
int i, j, k, l, m, nn, mm, ll, nm, mxv, myv, mxyv, nxyv;
float qtmh, ci2, edgelx, edgely, edgelz, edgerx, edgery, edgerz;
float dxp, dyp, dzp, amx, amy, amz, dx, dy, dz, ox, oy, oz, dx1;
float acx, acy, acz, omxt, p2, gami, qtmg, omyt, omzt, omt, anorm;
float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9, dtg;
float x, y, z, vx, vy, vz;
double sum1, sum2;
__m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4;
__m512i v_nn, v_mm, v_ll, v_nm, v_it, v_perm;
__m512 v_qtmh, v_ci2, v_dt, v_dtc, v_one, v_zero;
__m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz;
__m512 v_dx1, v_gami, v_at, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz;
__m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz;
__m512 a, b, c, d, e, f, g, h, p, q, r, s;
__m512 v_two, v_half, v_ox, v_oy, v_oz;
__m512d v_sum1, v_d;
__mmask16 msk;
__attribute__((aligned(64))) unsigned int kk[16];
__attribute__((aligned(64))) double dd[8];
__attribute__((aligned(64))) float sfxyz[4*MXV*MYV*MZV];
__attribute__((aligned(64))) float sbxyz[4*MXV*MYV*MZV];
/* __attribute__((aligned(64))) float sfxyz[4*(mx+1)*(my+1)*(mz+1)]; */
/* __attribute__((aligned(64))) float sbxyz[4*(mx+1)*(my+1)*(mz+1)]; */
mxy1 = mx1*my1;
/* mxv = MXV; */
/* myv = MYV; */
mxv = mx+1;
myv = my+1;
mxyv = mxv*myv;
nxyv = nxv*nyv;
qtmh = 0.5f*qbm*dt;
ci2 = ci*ci;
sum2 = 0.0;
/* set boundary values */
edgelx = 0.0f;
edgely = 0.0f;
edgelz = 0.0f;
edgerx = (float) nx;
edgery = (float) ny;
edgerz = (float) nz;
if (ipbc==2) {
edgelx = 1.0f;
edgely = 1.0f;
edgelz = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
edgerz = (float) (nz-1);
}
else if (ipbc==3) {
edgelx = 1.0f;
edgely = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
}
v_mxv4 = _mm512_set1_epi32(4*mxv);
v_mxyv4 = _mm512_set1_epi32(4*mxyv);
v_perm = _mm512_set_epi32(15,11,7,3,14,10,6,2,13,9,5,1,12,8,4,0);
v_qtmh = _mm512_set1_ps(qtmh);
v_ci2 = _mm512_set1_ps(ci2);
v_dt = _mm512_set1_ps(dt);
v_dtc = _mm512_set1_ps(dtc);
v_one = _mm512_set1_ps(1.0f);
v_zero = _mm512_setzero_ps();
v_two = _mm512_set1_ps(2.0f);
v_half = _mm512_set1_ps(0.5f);
v_edgelx = _mm512_set1_ps(edgelx);
v_edgely = _mm512_set1_ps(edgely);
v_edgelz = _mm512_set1_ps(edgelz);
v_edgerx = _mm512_set1_ps(edgerx);
v_edgery = _mm512_set1_ps(edgery);
v_edgerz = _mm512_set1_ps(edgerz);
v_sum1 = _mm512_set1_pd(0.0);
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,l,m,noff,moff,loff,npp,npoff,nps,nn,mm,ll,nm,x,y,z,vx, \
vy,vz,dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,ox,oy,oz,acx,acy,acz,omxt, \
omyt,omzt,omt,anorm,rot1,rot2,rot3,rot4,rot5,rot6,rot7,rot8,rot9,p2, \
gami,qtmg,dtg,sum1,v_noff,v_moff,v_loff,v_nn,v_mm,v_ll,v_nm,v_it,v_x, \
v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx,v_amy,v_amz,v_dx1,v_dx,v_dy,v_dz,v_vx, \
v_vy,v_vz,v_ox,v_oy,v_oz,v_gami,v_at,v_d,v_sum1,a,b,c,d,e,f,g,h,p,q,r, \
s,msk,kk,dd,sfxyz,sbxyz) \
reduction(+:sum2)
for (l = 0; l < mxyz1; l++) {
loff = l/mxy1;
k = l - mxy1*loff;
loff = mz*loff;
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
v_noff = _mm512_set1_epi32(noff);
v_moff = _mm512_set1_epi32(moff);
v_loff = _mm512_set1_epi32(loff);
npp = kpic[l];
npoff = idimp*nppmx*l;
/* load local fields from global array */
nn = (mx < nx-noff ? mx : nx-noff) + 1;
mm = (my < ny-moff ? my : ny-moff) + 1;
ll = (mz < nz-loff ? mz : nz-loff) + 1;
nps = 4*(nn/4);
/* load electric field */
for (k = 0; k < ll; k++) {
for (j = 0; j < mm; j++) {
/* vector loop over elements in blocks of 4 */
/* for (i = 0; i < nn; i++) { */
/* sfxyz[4*(i+mxv*j+mxyv*k)] */
/* = fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sfxyz[1+4*(i+mxv*j+mxyv*k)] */
/* = fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sfxyz[2+4*(i+mxv*j+mxyv*k)] */
/* = fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* } */
for (i = 0; i < nps; i+=4) {
m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff));
v_at = _mm512_loadunpacklo_ps(v_at,&fxyz[m]);
v_at = _mm512_loadunpackhi_ps(v_at,&fxyz[m+16]);
m = 4*(i + mxv*j + mxyv*k);
_mm512_packstorelo_ps(&sfxyz[m],v_at);
_mm512_packstorehi_ps(&sfxyz[m+16],v_at);
}
/* loop over remaining elements */
for (i = nps; i < nn; i++) {
sfxyz[4*(i+mxv*j+mxyv*k)]
= fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sfxyz[1+4*(i+mxv*j+mxyv*k)]
= fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sfxyz[2+4*(i+mxv*j+mxyv*k)]
= fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sfxyz[3+4*(i+mxv*j+mxyv*k)]
= fxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
}
}
}
/* load magnetic field */
for (k = 0; k < ll; k++) {
for (j = 0; j < mm; j++) {
/* vector loop over elements in blocks of 4 */
/* for (i = 0; i < nn; i++) { */
/* sbxyz[4*(i+mxv*j+mxyv*k)] */
/* = bxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sbxyz[1+4*(i+mxv*j+mxyv*k)] */
/* = bxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sbxyz[2+4*(i+mxv*j+mxyv*k)] */
/* = bxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* } */
for (i = 0; i < nps; i+=4) {
m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff));
v_at = _mm512_loadunpacklo_ps(v_at,&bxyz[m]);
v_at = _mm512_loadunpackhi_ps(v_at,&bxyz[m+16]);
m = 4*(i + mxv*j + mxyv*k);
_mm512_packstorelo_ps(&sbxyz[m],v_at);
_mm512_packstorehi_ps(&sbxyz[m+16],v_at);
}
/* loop over remaining elements */
for (i = nps; i < nn; i++) {
sbxyz[4*(i+mxv*j+mxyv*k)]
= bxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sbxyz[1+4*(i+mxv*j+mxyv*k)]
= bxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sbxyz[2+4*(i+mxv*j+mxyv*k)]
= bxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sbxyz[3+4*(i+mxv*j+mxyv*k)]
= bxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
}
}
}
nps = 16*(npp/16);
sum1 = 0.0;
v_sum1 = _mm512_set1_pd(0.0);
/* loop over particles in tile in blocks of 16 */
for (j = 0; j < nps; j+=16) {
/* find interpolation weights */
/* x = ppart[j+npoff]; */
/* y = ppart[j+nppmx+npoff]; */
/* z = ppart[j+2*nppmx+npoff]; */
v_x = _mm512_load_ps(&ppart[j+npoff]);
v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]);
v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]);
/* nn = x; */
/* mm = y; */
/* ll = z; */
v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
/* dxp = x - (float) nn; */
/* dyp = y - (float) mm; */
/* dzp = z - (float) ll; */
v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dxp = _mm512_sub_ps(v_x,v_dxp);
v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dyp = _mm512_sub_ps(v_y,v_dyp);
v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dzp = _mm512_sub_ps(v_z,v_dzp);
/* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */
v_nn = _mm512_sub_epi32(v_nn,v_noff);
v_mm = _mm512_sub_epi32(v_mm,v_moff);
v_ll = _mm512_sub_epi32(v_ll,v_loff);
v_it = _mm512_mullo_epi32(v_mxyv4,v_ll);
v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm));
v_nm = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it);
/* amx = 1.0f - dxp; */
/* amy = 1.0f - dyp; */
/* amz = 1.0f - dzp; */
v_amx = _mm512_sub_ps(v_one,v_dxp);
v_amy = _mm512_sub_ps(v_one,v_dyp);
v_amz = _mm512_sub_ps(v_one,v_dzp);
/* dx1 = dxp*dyp; */
/* dyp = amx*dyp; */
/* amx = amx*amy; */
/* amy = dxp*amy; */
v_dx1 = _mm512_mul_ps(v_dxp,v_dyp);
v_dyp = _mm512_mul_ps(v_amx,v_dyp);
v_amx = _mm512_mul_ps(v_amx,v_amy);
v_amy = _mm512_mul_ps(v_dxp,v_amy);
/* find electric field */
/* nn = nm; */
_mm512_store_epi32(kk,v_nm);
/* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[nn:nn+3] field components */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find first part of electric field */
/* dx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */
v_dx = _mm512_mul_ps(v_amx,a);
v_dx = _mm512_fmadd_ps(v_amy,p,v_dx);
/* dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */
v_dy = _mm512_mul_ps(v_amx,b);
v_dy = _mm512_fmadd_ps(v_amy,q,v_dy);
/* dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */
v_dz = _mm512_mul_ps(v_amx,c);
v_dz = _mm512_fmadd_ps(v_amy,r,v_dz);
/* mm = nn + 4*mxv; */
/* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find second part of electric field */
/* dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */
v_dx = _mm512_fmadd_ps(v_dyp,a,v_dx);
v_dx = _mm512_fmadd_ps(v_dx1,p,v_dx);
v_dx = _mm512_mul_ps(v_amz,v_dx);
/* dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */
v_dy = _mm512_fmadd_ps(v_dyp,b,v_dy);
v_dy = _mm512_fmadd_ps(v_dx1,q,v_dy);
v_dy = _mm512_mul_ps(v_amz,v_dy);
/* dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */
v_dz = _mm512_fmadd_ps(v_dyp,c,v_dz);
v_dz = _mm512_fmadd_ps(v_dx1,r,v_dz);
v_dz = _mm512_mul_ps(v_amz,v_dz);
/* nn += 4*mxyv; */
v_nn = _mm512_add_epi32(v_nm,v_mxyv4);
_mm512_store_epi32(kk,v_nn);
/* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[nn:nn+3] field components */
/* where nn = nn + 4*mxyv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */
/* where nn = nn + 4*mxyv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find third part of electric field */
/* vx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */
v_vx = _mm512_mul_ps(v_amx,a);
v_vx = _mm512_fmadd_ps(v_amy,p,v_vx);
/* vy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */
v_vy = _mm512_mul_ps(v_amx,b);
v_vy = _mm512_fmadd_ps(v_amy,q,v_vy);
/* vz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */
v_vz = _mm512_mul_ps(v_amx,c);
v_vz = _mm512_fmadd_ps(v_amy,r,v_vz);
/* mm = nn + 4*mxv; */
/* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find fourth part of electric field */
/* dx = dx + dzp*(vx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */
v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx);
v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx);
v_dx = _mm512_fmadd_ps(v_dzp,v_vx,v_dx);
/* dy = dy + dzp*(vy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */
v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy);
v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy);
v_dy = _mm512_fmadd_ps(v_dzp,v_vy,v_dy);
/* dz = dz + dzp*(vz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */
v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz);
v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz);
v_dz = _mm512_fmadd_ps(v_dzp,v_vz,v_dz);
/* find magnetic field */
/* nn = nm; */
_mm512_store_epi32(kk,v_nm);
/* load sbxyz[nn:nn+3] and sbxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sbxyz[nn:nn+3] field components */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sbxyz[nn+4:nn+7] field components */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find first part of magnetic field */
/* ox = amx*sbxyz[nn] + amy*sbxyz[nn+4]; */
v_ox = _mm512_mul_ps(v_amx,a);
v_ox = _mm512_fmadd_ps(v_amy,p,v_ox);
/* oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; */
v_oy = _mm512_mul_ps(v_amx,b);
v_oy = _mm512_fmadd_ps(v_amy,q,v_oy);
/* oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; */
v_oz = _mm512_mul_ps(v_amx,c);
v_oz = _mm512_fmadd_ps(v_amy,r,v_oz);
/* mm = nn + 4*mxv; */
/* load sbxyz[mm:mm+3] and sbxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sbxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sbxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find second part of magnetic field */
/* ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); */
v_ox = _mm512_fmadd_ps(v_dyp,a,v_ox);
v_ox = _mm512_fmadd_ps(v_dx1,p,v_ox);
v_ox = _mm512_mul_ps(v_amz,v_ox);
/* oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); */
v_oy = _mm512_fmadd_ps(v_dyp,b,v_oy);
v_oy = _mm512_fmadd_ps(v_dx1,q,v_oy);
v_oy = _mm512_mul_ps(v_amz,v_oy);
/* oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); */
v_oz = _mm512_fmadd_ps(v_dyp,c,v_oz);
v_oz = _mm512_fmadd_ps(v_dx1,r,v_oz);
v_oz = _mm512_mul_ps(v_amz,v_oz);
/* nn += 4*mxyv; */
v_nn = _mm512_add_epi32(v_nm,v_mxyv4);
_mm512_store_epi32(kk,v_nn);
/* load sbxyz[nn:nn+3] and sbxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sbxyz[nn:nn+3] field components */
/* where nn = nn + 4*mxyv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sbxyz[nn+4:nn+7] field components */
/* where nn = nn + 4*mxyv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find third part of magnetic field */
/* vx = amx*sbxyz[nn] + amy*sbxyz[nn+4]; */
v_vx = _mm512_mul_ps(v_amx,a);
v_vx = _mm512_fmadd_ps(v_amy,p,v_vx);
/* vy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; */
v_vy = _mm512_mul_ps(v_amx,b);
v_vy = _mm512_fmadd_ps(v_amy,q,v_vy);
/* vz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; */
v_vz = _mm512_mul_ps(v_amx,c);
v_vz = _mm512_fmadd_ps(v_amy,r,v_vz);
/* mm = nn + 4*mxv; */
/* load sbxyz[mm:mm+3] and sbxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sbxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sbxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find fourth part of magnetic field */
/* ox = ox + dzp*(vx + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); */
v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx);
v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx);
v_ox = _mm512_fmadd_ps(v_dzp,v_vx,v_ox);
/* oy = oy + dzp*(vy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); */
v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy);
v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy);
v_oy = _mm512_fmadd_ps(v_dzp,v_vy,v_oy);
/* oz = oz + dzp*(vz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); */
v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz);
v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz);
v_oz = _mm512_fmadd_ps(v_dzp,v_vz,v_oz);
/* calculate half impulse */
/* dx *= qtmh; */
/* dy *= qtmh; */
/* dz *= qtmh; */
v_dx = _mm512_mul_ps(v_dx,v_qtmh);
v_dy = _mm512_mul_ps(v_dy,v_qtmh);
v_dz = _mm512_mul_ps(v_dz,v_qtmh);
/* half acceleration */
/* acx = ppart[j+3*nppmx+npoff] + dx; */
/* acy = ppart[j+4*nppmx+npoff] + dy; */
/* acz = ppart[j+5*nppmx+npoff] + dz; */
a = _mm512_add_ps(v_dx,_mm512_load_ps(&ppart[j+3*nppmx+npoff]));
b = _mm512_add_ps(v_dy,_mm512_load_ps(&ppart[j+4*nppmx+npoff]));
c = _mm512_add_ps(v_dz,_mm512_load_ps(&ppart[j+5*nppmx+npoff]));
/* find inverse gamma */
/* p2 = acx*acx + acy*acy + acz*acz; */
v_at = _mm512_fmadd_ps(b,b,_mm512_mul_ps(a,a));
v_at = _mm512_fmadd_ps(c,c,v_at);
/* gami = 1.0f/sqrtf(1.0f + p2*ci2); */
/* approximate calculation */
/* v_gami = _mm512_rsqrt23_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */
/* full accuracy calculation */
v_gami = _mm512_sqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one));
v_gami = _mm512_div_ps(v_one,v_gami);
/* full accuracy calculation with SVML */
/* v_gami = _mm512_invsqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */
/* time-centered kinetic energy */
/* sum1 += gami*p2/(1.0f + gami); */
v_at = _mm512_mul_ps(v_gami,v_at);
v_at = _mm512_div_ps(v_at,_mm512_add_ps(v_one,v_gami));
/* convert to double precision before accumulating */
v_sum1 = _mm512_add_pd(v_sum1,_mm512_cvtpslo_pd(v_at));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_at,78));
v_sum1 = _mm512_add_pd(v_sum1,v_d);
/* renormalize magnetic field */
/* qtmg = qtmh*gami; */
v_at = _mm512_mul_ps(v_qtmh,v_gami);
/* calculate cyclotron frequency */
/* omxt = qtmg*ox; */
/* omyt = qtmg*oy; */
/* omzt = qtmg*oz; */
e = _mm512_mul_ps(v_at,v_ox);
f = _mm512_mul_ps(v_at,v_oy);
g = _mm512_mul_ps(v_at,v_oz);
/* calculate rotation matrix */
/* vx = omxt*omxt; */
v_vx = _mm512_mul_ps(e,e);
/* vy = omyt*omyt; */
v_vy = _mm512_mul_ps(f,f);
/* vz = omzt*omzt; */
v_vz = _mm512_mul_ps(g,g);
/* omt = omxt*omxt + omyt*omyt + omzt*omzt; */
v_at = _mm512_add_ps(_mm512_add_ps(v_vx,v_vy),v_vz);
/* anorm = 2.0f/(1.0f + omt); */
d = _mm512_div_ps(v_two,_mm512_add_ps(v_one,v_at));
/* omt = 0.5f*(1.0f - omt); */
h = _mm512_mul_ps(v_half,_mm512_sub_ps(v_one,v_at));
/* vx = (omt + vx)*acx; */
v_vx = _mm512_mul_ps(_mm512_add_ps(h,v_vx),a);
/* vy = (omt + vy)*acy; */
v_vy = _mm512_mul_ps(_mm512_add_ps(h,v_vy),b);
/* vz = (omt + vz)*acz; */
v_vz = _mm512_mul_ps(_mm512_add_ps(h,v_vz),c);
/* omt = omxt*omyt; */
h = _mm512_mul_ps(e,f);
/* vx = vx + (omzt + omt)*acy; */
v_vx = _mm512_fmadd_ps(_mm512_add_ps(h,g),b,v_vx);
/* vy = vy + (omt - omzt)*acx; */
v_vy = _mm512_fmadd_ps(_mm512_sub_ps(h,g),a,v_vy);
/* omt = omxt*omzt; */
h = _mm512_mul_ps(e,g);
/* vx = vx + (omt - omyt)*acz; */
v_vx = _mm512_fmadd_ps(_mm512_sub_ps(h,f),c,v_vx);
/* vz = vz + (omt + omyt)*acx; */
v_vz = _mm512_fmadd_ps(_mm512_add_ps(h,f),a,v_vz);
/* omt = omyt*omzt; */
h = _mm512_mul_ps(f,g);
/* vy = vy + (omt + omxt)*acz; */
v_vy = _mm512_fmadd_ps(_mm512_add_ps(h,e),c,v_vy);
/* vz = vz + (omt - omxt)*acy; */
v_vz = _mm512_fmadd_ps(_mm512_sub_ps(h,e),b,v_vz);
/* new momentum */
/* vx = dx + (rot1*acx + rot2*acy + rot3*acz)*anorm; */
/* vy = dy + (rot4*acx + rot5*acy + rot6*acz)*anorm; */
/* vz = dz + (rot7*acx + rot8*acy + rot9*acz)*anorm; */
v_vx = _mm512_fmadd_ps(v_vx,d,v_dx);
v_vy = _mm512_fmadd_ps(v_vy,d,v_dy);
v_vz = _mm512_fmadd_ps(v_vz,d,v_dz);
/* update inverse gamma */
/* p2 = vx*vx + vy*vy + vz*vz; */
v_at = _mm512_fmadd_ps(v_vy,v_vy,_mm512_mul_ps(v_vx,v_vx));
v_at = _mm512_fmadd_ps(v_vz,v_vz,v_at);
/* dtg = dtc/sqrtf(1.0f + p2*ci2); */
/* approximate calculation */
/* v_at = _mm512_rsqrt23_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */
/* v_at = _mm512_mul_ps(v_dtc,v_at); */
/* full accuracy calculation */
v_at = _mm512_sqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one));
v_at = _mm512_div_ps(v_dtc,v_at);
/* full accuracy calculation with SVML */
/* v_gami = _mm512_invsqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */
/* v_at = _mm512_div_ps(v_dtc,v_at); */
/* new position */
/* dx = x + vx*dtg; */
/* dy = y + vy*dtg; */
/* dz = z + vz*dtg; */
v_dx = _mm512_fmadd_ps(v_vx,v_at,v_x);
v_dy = _mm512_fmadd_ps(v_vy,v_at,v_y);
v_dz = _mm512_fmadd_ps(v_vz,v_at,v_z);
/* reflecting boundary conditions */
if (ipbc==2) {
/* if ((dx < edgelx) || (dx >= edgerx)) { */
/* dx = x; */
/* vx = -vx; */
/* } */
msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx,
_MM_CMPINT_GE));
v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x);
v_vx = _mm512_mask_sub_ps(v_vx,msk,v_zero,v_vx);
/* if ((dy < edgely) || (dy >= edgery)) { */
/* dy = y; */
/* vy = -vy; */
/* } */
msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery,
_MM_CMPINT_GE));
v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y);
v_vy = _mm512_mask_sub_ps(v_vy,msk,v_zero,v_vy);
/* if ((dz < edgelz) || (dz >= edgerz)) { */
/* dz = z; */
/* vz = -vz; */
/* } */
msk = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dz,v_edgerz,
_MM_CMPINT_GE));
v_dz = _mm512_mask_blend_ps(msk,v_dz,v_z);
v_vz = _mm512_mask_sub_ps(v_vz,msk,v_zero,v_vz);
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
/* if ((dx < edgelx) || (dx >= edgerx)) { */
/* dx = x; */
/* vx = -vx; */
/* } */
msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx,
_MM_CMPINT_GE));
v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x);
v_vx = _mm512_mask_sub_ps(v_vx,msk,v_zero,v_vx);
/* if ((dy < edgely) || (dy >= edgery)) { */
/* dy = y; */
/* vy = -vy; */
/* } */
msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery,
_MM_CMPINT_GE));
v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y);
v_vy = _mm512_mask_sub_ps(v_vy,msk,v_zero,v_vy);
}
/* set new position */
/* ppart[j+npoff] = dx; */
/* ppart[j+nppmx+npoff] = dy; */
/* ppart[j+2*nppmx+npoff] = dz; */
_mm512_store_ps(&ppart[j+npoff],v_dx);
_mm512_store_ps(&ppart[j+nppmx+npoff],v_dy);
_mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz);
/* set new momentum */
/* ppart[j+3*nppmx+npoff] = vx; */
/* ppart[j+4*nppmx+npoff] = vy; */
/* ppart[j+5*nppmx+npoff] = vz; */
_mm512_store_ps(&ppart[j+3*nppmx+npoff],v_vx);
_mm512_store_ps(&ppart[j+4*nppmx+npoff],v_vy);
_mm512_store_ps(&ppart[j+5*nppmx+npoff],v_vz);
}
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
z = ppart[j+2*nppmx+npoff];
nn = x;
mm = y;
ll = z;
dxp = x - (float) nn;
dyp = y - (float) mm;
dzp = z - (float) ll;
nm = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff));
amx = 1.0f - dxp;
amy = 1.0f - dyp;
dx1 = dxp*dyp;
dyp = amx*dyp;
amx = amx*amy;
amz = 1.0f - dzp;
amy = dxp*amy;
/* find electric field */
nn = nm;
dx = amx*sfxyz[nn] + amy*sfxyz[nn+4];
dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4];
dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4];
mm = nn + 4*mxv;
dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]);
dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]);
dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]);
nn += 4*mxyv;
acx = amx*sfxyz[nn] + amy*sfxyz[nn+4];
acy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4];
acz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4];
mm = nn + 4*mxv;
dx = dx + dzp*(acx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]);
dy = dy + dzp*(acy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]);
dz = dz + dzp*(acz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]);
/* find magnetic field */
nn = nm;
ox = amx*sbxyz[nn] + amy*sbxyz[nn+4];
oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4];
oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4];
mm = nn + 4*mxv;
ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]);
oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]);
oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]);
nn += 4*mxyv;
acx = amx*sbxyz[nn] + amy*sbxyz[nn+4];
acy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4];
acz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4];
mm = nn + 4*mxv;
ox = ox + dzp*(acx + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]);
oy = oy + dzp*(acy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]);
oz = oz + dzp*(acz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]);
/* calculate half impulse */
dx *= qtmh;
dy *= qtmh;
dz *= qtmh;
/* half acceleration */
acx = ppart[j+3*nppmx+npoff] + dx;
acy = ppart[j+4*nppmx+npoff] + dy;
acz = ppart[j+5*nppmx+npoff] + dz;
/* find inverse gamma */
p2 = acx*acx + acy*acy + acz*acz;
gami = 1.0f/sqrtf(1.0f + p2*ci2);
/* renormalize magnetic field */
qtmg = qtmh*gami;
/* time-centered kinetic energy */
sum1 += gami*p2/(1.0f + gami);
/* calculate cyclotron frequency */
omxt = qtmg*ox;
omyt = qtmg*oy;
omzt = qtmg*oz;
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0f/(1.0f + omt);
omt = 0.5f*(1.0f - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new momentum */
vx = dx + (rot1*acx + rot2*acy + rot3*acz)*anorm;
vy = dy + (rot4*acx + rot5*acy + rot6*acz)*anorm;
vz = dz + (rot7*acx + rot8*acy + rot9*acz)*anorm;
/* update inverse gamma */
p2 = vx*vx + vy*vy + vz*vz;
dtg = dtc/sqrtf(1.0f + p2*ci2);
/* new position */
dx = x + vx*dtg;
dy = y + vy*dtg;
dz = z + vz*dtg;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
vx = -vx;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
vy = -vy;
}
if ((dz < edgelz) || (dz >= edgerz)) {
dz = z;
vz = -vz;
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
vx = -vx;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
vy = -vy;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
ppart[j+2*nppmx+npoff] = dz;
/* set new momentum */
ppart[j+3*nppmx+npoff] = vx;
ppart[j+4*nppmx+npoff] = vy;
ppart[j+5*nppmx+npoff] = vz;
}
/* sum2 += sum1; */
_mm512_store_pd(&dd[0],v_sum1);
for (j = 1; j < 8; j++) {
dd[0] += dd[j];
}
sum2 += (sum1 + dd[0]);
}
/* normalize kinetic energy */
*ek += sum2;
return;
#undef MXV
#undef MYV
#undef MZV
}
/*--------------------------------------------------------------------*/
void ckncgrbppushf3lt(float ppart[], float fxyz[], float bxyz[],
int kpic[], int ncl[], int ihole[], float qbm,
float dt, float dtc, float ci, float *ek,
int idimp, int nppmx, int nx, int ny, int nz,
int mx, int my, int mz, int nxv, int nyv, int nzv,
int mx1, int my1, int mxyz1, int ntmax,
int *irc) {
/* for 3d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, for relativistic particles with magnetic field
Using the Boris Mover.
also determines list of particles which are leaving this tile
OpenMP/vector version using guard cells
data read in tiles
particles stored segmented array
202 flops/particle, 4 divides, 2 sqrts, 54 loads, 6 stores
input: all except ncl, ihole, irc, output: ppart, ncl, ihole, ek, irc
momentum equations used are:
px(t+dt/2) = rot(1)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) +
rot(2)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) +
rot(3)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) +
.5*(q/m)*fx(x(t),y(t),z(t))*dt)
py(t+dt/2) = rot(4)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) +
rot(5)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) +
rot(6)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) +
.5*(q/m)*fy(x(t),y(t),z(t))*dt)
pz(t+dt/2) = rot(7)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) +
rot(8)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) +
rot(9)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) +
.5*(q/m)*fz(x(t),y(t),z(t))*dt)
where q/m is charge/mass, and the rotation matrix is given by:
rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2)
rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2)
rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2)
and om**2 = omx**2 + omy**2 + omz**2
the rotation matrix is determined by:
omx = (q/m)*bx(x(t),y(t),z(t))*gami,
omy = (q/m)*by(x(t),y(t),z(t))*gami,
omz = (q/m)*bz(x(t),y(t),z(t))*gami,
where gami = 1./sqrt(1.+(px(t)*px(t)+py(t)*py(t)+pz(t)*pz(t))*ci*ci)
position equations used are:
x(t+dt) = x(t) + px(t+dt/2)*dtg
y(t+dt) = y(t) + py(t+dt/2)*dtg
z(t+dt) = z(t) + pz(t+dt/2)*dtg
where dtg = dtc/sqrt(1.+(px(t+dt/2)*px(t+dt/2)+py(t+dt/2)*py(t+dt/2)+
pz(t+dt/2)*pz(t+dt/2))*ci*ci)
fx(x(t),y(t),z(t)), fy(x(t),y(t),z(t)), and fz(x(t),y(t),z(t)),
bx(x(t),y(t),z(t)), by(x(t),y(t),z(t)), and bz(x(t),y(t),z(t))
are approximated by interpolation from the nearest grid points:
fx(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fx(n,m,l)+dx*fx(n+1,m,l))
+ dy*((1-dx)*fx(n,m+1,l) + dx*fx(n+1,m+1,l)))
+ dz*((1-dy)*((1-dx)*fx(n,m,l+1)+dx*fx(n+1,m,l+1))
+ dy*((1-dx)*fx(n,m+1,l+1) + dx*fx(n+1,m+1,l+1)))
where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l
similarly for fy(x,y,z), fz(x,y,z), bx(x,y,z), by(x,y,z), bz(x,y,z)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = position z of particle n in tile m
ppart[m][3][n] = momentum px of particle n in tile m
ppart[m][4][n] = momentum py of particle n in tile m
ppart[m][5][n] = momentum pz of particle n in tile m
fxyz[l][k][j][0] = x component of force/charge at grid (j,k,l)
fxyz[l][k][j][1] = y component of force/charge at grid (j,k,l)
fxyz[l][k][j][2] = z component of force/charge at grid (j,k,l)
that is, convolution of electric field over particle shape
bxyz[l][k][j][0] = x component of magnetic field at grid (j,k,l)
bxyz[l][k][j][1] = y component of magnetic field at grid (j,k,l)
bxyz[l][k][j][2] = z component of magnetic field at grid (j,k,l)
that is, the convolution of magnetic field over particle shape
kpic[l] = number of particles in tile l
ncl[l][i] = number of particles going to destination i, tile l
ihole[l][:][0] = location of hole in array left by departing particle
ihole[l][:][1] = direction destination of particle leaving hole
all for tile l
ihole[l][0][0] = ih, number of holes left (error, if negative)
qbm = particle charge/mass ratio
dt = time interval between successive force calculations
dtc = time interval between successive co-ordinate calculations
ci = reciprocal of velocity of light
kinetic energy/mass at time t is also calculated, using
ek = gami*sum((px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 +
(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 +
(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)/(1. + gami)
idimp = size of phase space = 6
nppmx = maximum number of particles in tile
nx/ny/nz = system length in x/y/z direction
mx/my/mz = number of grids in sorting cell in x/y/z
nxv = second dimension of field arrays, must be >= nx+1
nyv = third dimension of field arrays, must be >= ny+1
nzv = fourth dimension of field array, must be >= nz+1
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mxyz1 = mx1*my1*mz1,
where mz1 = (system length in z direction - 1)/mz + 1
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
requires KNC, ppart needs to be 64 byte aligned
nppmx needs to be a multiple of 16
fxyz needs to have 4 components, although one is not used
local data */
#define MXV 17
#define MYV 17
#define MZV 17
int mxy1, noff, moff, loff, npoff, npp, nps;
int i, j, k, l, m, ii, ih, nh, nn, mm, ll, nm, mxv, myv, mxyv, nxyv;
float anx, any, anz, edgelx, edgely, edgelz, edgerx, edgery, edgerz;
float dxp, dyp, dzp, amx, amy, amz, dx, dy, dz, ox, oy, oz, dx1;
float acx, acy, acz, omxt, p2, gami, qtmg, omyt, omzt, omt, anorm;
float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9, dtg;
float qtmh, ci2, x, y, z, vx, vy, vz;
double sum1, sum2;
__m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4;
__m512i v_nn, v_mm, v_ll, v_nm, v_it, v_0, v_1, v_3, v_9, v_perm;
__m512 v_dt, v_dtc, v_one, v_zero, v_anx, v_any, v_anz;
__m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz;
__m512 v_dx1, v_gami, v_at, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz;
__m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz;
__m512 a, b, c, d, e, f, g, h, p, q, r, s;
__m512 v_qtmh, v_ci2, v_two, v_half, v_ox, v_oy, v_oz;
__m512d v_sum1, v_d;
__mmask16 msk1, msk2;
__attribute__((aligned(64))) unsigned int kk[16];
__attribute__((aligned(64))) double dd[8];
__attribute__((aligned(64))) float sfxyz[4*MXV*MYV*MZV];
__attribute__((aligned(64))) float sbxyz[4*MXV*MYV*MZV];
/* __attribute__((aligned(64))) float sfxyz[4*(mx+1)*(my+1)*(mz+1)]; */
/* __attribute__((aligned(64))) float sbxyz[4*(mx+1)*(my+1)*(mz+1)]; */
mxy1 = mx1*my1;
/* mxv = MXV; */
/* myv = MYV; */
mxv = mx+1;
myv = my+1;
mxyv = mxv*myv;
nxyv = nxv*nyv;
qtmh = 0.5f*qbm*dt;
ci2 = ci*ci;
anx = (float) nx;
any = (float) ny;
anz = (float) nz;
sum2 = 0.0;
/* set boundary values */
v_mxv4 = _mm512_set1_epi32(4*mxv);
v_mxyv4 = _mm512_set1_epi32(4*mxyv);
v_0 = _mm512_set1_epi32(0);
v_1 = _mm512_set1_epi32(1);
v_3 = _mm512_set1_epi32(3);
v_9 = _mm512_set1_epi32(9);
v_perm = _mm512_set_epi32(15,11,7,3,14,10,6,2,13,9,5,1,12,8,4,0);
v_qtmh = _mm512_set1_ps(qtmh);
v_ci2 = _mm512_set1_ps(ci2);
v_dt = _mm512_set1_ps(dt);
v_dtc = _mm512_set1_ps(dtc);
v_one = _mm512_set1_ps(1.0f);
v_zero = _mm512_setzero_ps();
v_two = _mm512_set1_ps(2.0f);
v_half = _mm512_set1_ps(0.5f);
v_anx = _mm512_set1_ps(anx);
v_any = _mm512_set1_ps(any);
v_anz = _mm512_set1_ps(anz);
v_sum1 = _mm512_set1_pd(0.0);
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,l,m,ii,noff,moff,loff,npp,npoff,nps,nn,mm,ll,nm,ih,nh,x, \
y,z,vx,vy,vz,dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,ox,oy,oz,acx,acy,acz, \
omxt,omyt,omzt,omt,anorm,rot1,rot2,rot3,rot4,rot5,rot6,rot7,rot8,rot9, \
edgelx,edgely,edgelz,edgerx,edgery,edgerz,p2,gami,qtmg,dtg,sum1,v_noff, \
v_moff,v_loff,v_nn,v_mm,v_ll,v_nm,v_it,v_x,v_y,v_z,v_dxp,v_dyp,v_dzp, \
v_amx,v_amy,v_amz,v_dx1,v_dx,v_dy,v_dz,v_vx,v_vy,v_vz,v_ox,v_oy,v_oz, \
v_gami,v_at,v_edgelx,v_edgely,v_edgelz,v_edgerx,v_edgery,v_edgerz,v_d, \
v_sum1,a,b,c,d,e,f,g,h,p,q,r,s,msk1,msk2,kk,dd,sfxyz,sbxyz) \
reduction(+:sum2)
for (l = 0; l < mxyz1; l++) {
loff = l/mxy1;
k = l - mxy1*loff;
loff = mz*loff;
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
v_noff = _mm512_set1_epi32(noff);
v_moff = _mm512_set1_epi32(moff);
v_loff = _mm512_set1_epi32(loff);
npp = kpic[l];
npoff = idimp*nppmx*l;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
ll = nz - loff;
ll = mz < ll ? mz : ll;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
edgelz = loff;
edgerz = loff + ll;
v_edgelx = _mm512_set1_ps(edgelx);
v_edgely = _mm512_set1_ps(edgely);
v_edgelz = _mm512_set1_ps(edgelz);
v_edgerx = _mm512_set1_ps(edgerx);
v_edgery = _mm512_set1_ps(edgery);
v_edgerz = _mm512_set1_ps(edgerz);
ih = 0;
nh = 0;
nn += 1;
mm += 1;
ll += 1;
/* load local fields from global array */
nps = 4*(nn/4);
/* load electric field */
for (k = 0; k < ll; k++) {
for (j = 0; j < mm; j++) {
/* vector loop over elements in blocks of 4 */
/* for (i = 0; i < nn; i++) { */
/* sfxyz[4*(i+mxv*j+mxyv*k)] */
/* = fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sfxyz[1+4*(i+mxv*j+mxyv*k)] */
/* = fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sfxyz[2+4*(i+mxv*j+mxyv*k)] */
/* = fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* } */
for (i = 0; i < nps; i+=4) {
m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff));
v_at = _mm512_loadunpacklo_ps(v_at,&fxyz[m]);
v_at = _mm512_loadunpackhi_ps(v_at,&fxyz[m+16]);
m = 4*(i + mxv*j + mxyv*k);
_mm512_packstorelo_ps(&sfxyz[m],v_at);
_mm512_packstorehi_ps(&sfxyz[m+16],v_at);
}
/* loop over remaining elements */
for (i = nps; i < nn; i++) {
sfxyz[4*(i+mxv*j+mxyv*k)]
= fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sfxyz[1+4*(i+mxv*j+mxyv*k)]
= fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sfxyz[2+4*(i+mxv*j+mxyv*k)]
= fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sfxyz[3+4*(i+mxv*j+mxyv*k)]
= fxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
}
}
}
/* load magnetic field */
for (k = 0; k < ll; k++) {
for (j = 0; j < mm; j++) {
/* vector loop over elements in blocks of 4 */
/* for (i = 0; i < nn; i++) { */
/* sbxyz[4*(i+mxv*j+mxyv*k)] */
/* = bxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sbxyz[1+4*(i+mxv*j+mxyv*k)] */
/* = bxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sbxyz[2+4*(i+mxv*j+mxyv*k)] */
/* = bxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* } */
for (i = 0; i < nps; i+=4) {
m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff));
v_at = _mm512_loadunpacklo_ps(v_at,&bxyz[m]);
v_at = _mm512_loadunpackhi_ps(v_at,&bxyz[m+16]);
m = 4*(i + mxv*j + mxyv*k);
_mm512_packstorelo_ps(&sbxyz[m],v_at);
_mm512_packstorehi_ps(&sbxyz[m+16],v_at);
}
/* loop over remaining elements */
for (i = nps; i < nn; i++) {
sbxyz[4*(i+mxv*j+mxyv*k)]
= bxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sbxyz[1+4*(i+mxv*j+mxyv*k)]
= bxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sbxyz[2+4*(i+mxv*j+mxyv*k)]
= bxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sbxyz[3+4*(i+mxv*j+mxyv*k)]
= bxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
}
}
}
/* clear counters */
/* for (j = 0; j < 26; j++) { */
/* ncl[j+26*l] = 0; */
/* } */
memset((void*)&ncl[26*l],0,26*sizeof(int));
nps = 16*(npp/16);
sum1 = 0.0;
v_sum1 = _mm512_set1_pd(0.0);
/* loop over particles in tile in blocks of 16 */
for (j = 0; j < nps; j+=16) {
/* find interpolation weights */
/* x = ppart[j+npoff]; */
/* y = ppart[j+nppmx+npoff]; */
/* z = ppart[j+2*nppmx+npoff]; */
v_x = _mm512_load_ps(&ppart[j+npoff]);
v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]);
v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]);
/* nn = x; */
/* mm = y; */
/* ll = z; */
v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
/* dxp = x - (float) nn; */
/* dyp = y - (float) mm; */
/* dzp = z - (float) ll; */
v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dxp = _mm512_sub_ps(v_x,v_dxp);
v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dyp = _mm512_sub_ps(v_y,v_dyp);
v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dzp = _mm512_sub_ps(v_z,v_dzp);
/* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */
v_nn = _mm512_sub_epi32(v_nn,v_noff);
v_mm = _mm512_sub_epi32(v_mm,v_moff);
v_ll = _mm512_sub_epi32(v_ll,v_loff);
v_it = _mm512_mullo_epi32(v_mxyv4,v_ll);
v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm));
v_nm = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it);
/* amx = 1.0f - dxp; */
/* amy = 1.0f - dyp; */
/* amz = 1.0f - dzp; */
v_amx = _mm512_sub_ps(v_one,v_dxp);
v_amy = _mm512_sub_ps(v_one,v_dyp);
v_amz = _mm512_sub_ps(v_one,v_dzp);
/* dx1 = dxp*dyp; */
/* dyp = amx*dyp; */
/* amx = amx*amy; */
/* amy = dxp*amy; */
v_dx1 = _mm512_mul_ps(v_dxp,v_dyp);
v_dyp = _mm512_mul_ps(v_amx,v_dyp);
v_amx = _mm512_mul_ps(v_amx,v_amy);
v_amy = _mm512_mul_ps(v_dxp,v_amy);
/* find electric field */
/* nn = nm; */
_mm512_store_epi32(kk,v_nm);
/* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[nn:nn+3] field components */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find first part of electric field */
/* dx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */
v_dx = _mm512_mul_ps(v_amx,a);
v_dx = _mm512_fmadd_ps(v_amy,p,v_dx);
/* dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */
v_dy = _mm512_mul_ps(v_amx,b);
v_dy = _mm512_fmadd_ps(v_amy,q,v_dy);
/* dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */
v_dz = _mm512_mul_ps(v_amx,c);
v_dz = _mm512_fmadd_ps(v_amy,r,v_dz);
/* mm = nn + 4*mxv; */
/* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find second part of electric field */
/* dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */
v_dx = _mm512_fmadd_ps(v_dyp,a,v_dx);
v_dx = _mm512_fmadd_ps(v_dx1,p,v_dx);
v_dx = _mm512_mul_ps(v_amz,v_dx);
/* dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */
v_dy = _mm512_fmadd_ps(v_dyp,b,v_dy);
v_dy = _mm512_fmadd_ps(v_dx1,q,v_dy);
v_dy = _mm512_mul_ps(v_amz,v_dy);
/* dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */
v_dz = _mm512_fmadd_ps(v_dyp,c,v_dz);
v_dz = _mm512_fmadd_ps(v_dx1,r,v_dz);
v_dz = _mm512_mul_ps(v_amz,v_dz);
/* nn += 4*mxyv; */
v_nn = _mm512_add_epi32(v_nm,v_mxyv4);
_mm512_store_epi32(kk,v_nn);
/* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[nn:nn+3] field components */
/* where nn = nn + 4*mxyv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */
/* where nn = nn + 4*mxyv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find third part of electric field */
/* vx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */
v_vx = _mm512_mul_ps(v_amx,a);
v_vx = _mm512_fmadd_ps(v_amy,p,v_vx);
/* vy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */
v_vy = _mm512_mul_ps(v_amx,b);
v_vy = _mm512_fmadd_ps(v_amy,q,v_vy);
/* vz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */
v_vz = _mm512_mul_ps(v_amx,c);
v_vz = _mm512_fmadd_ps(v_amy,r,v_vz);
/* mm = nn + 4*mxv; */
/* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find fourth part of electric field */
/* dx = dx + dzp*(vx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */
v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx);
v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx);
v_dx = _mm512_fmadd_ps(v_dzp,v_vx,v_dx);
/* dy = dy + dzp*(vy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */
v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy);
v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy);
v_dy = _mm512_fmadd_ps(v_dzp,v_vy,v_dy);
/* dz = dz + dzp*(vz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */
v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz);
v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz);
v_dz = _mm512_fmadd_ps(v_dzp,v_vz,v_dz);
/* find magnetic field */
/* nn = nm; */
_mm512_store_epi32(kk,v_nm);
/* load sbxyz[nn:nn+3] and sbxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sbxyz[nn:nn+3] field components */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sbxyz[nn+4:nn+7] field components */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find first part of magnetic field */
/* ox = amx*sbxyz[nn] + amy*sbxyz[nn+4]; */
v_ox = _mm512_mul_ps(v_amx,a);
v_ox = _mm512_fmadd_ps(v_amy,p,v_ox);
/* oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; */
v_oy = _mm512_mul_ps(v_amx,b);
v_oy = _mm512_fmadd_ps(v_amy,q,v_oy);
/* oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; */
v_oz = _mm512_mul_ps(v_amx,c);
v_oz = _mm512_fmadd_ps(v_amy,r,v_oz);
/* mm = nn + 4*mxv; */
/* load sbxyz[mm:mm+3] and sbxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sbxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sbxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find second part of magnetic field */
/* ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); */
v_ox = _mm512_fmadd_ps(v_dyp,a,v_ox);
v_ox = _mm512_fmadd_ps(v_dx1,p,v_ox);
v_ox = _mm512_mul_ps(v_amz,v_ox);
/* oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); */
v_oy = _mm512_fmadd_ps(v_dyp,b,v_oy);
v_oy = _mm512_fmadd_ps(v_dx1,q,v_oy);
v_oy = _mm512_mul_ps(v_amz,v_oy);
/* oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); */
v_oz = _mm512_fmadd_ps(v_dyp,c,v_oz);
v_oz = _mm512_fmadd_ps(v_dx1,r,v_oz);
v_oz = _mm512_mul_ps(v_amz,v_oz);
/* nn += 4*mxyv; */
v_nn = _mm512_add_epi32(v_nm,v_mxyv4);
_mm512_store_epi32(kk,v_nn);
/* load sbxyz[nn:nn+3] and sbxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sbxyz[nn:nn+3] field components */
/* where nn = nn + 4*mxyv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sbxyz[nn+4:nn+7] field components */
/* where nn = nn + 4*mxyv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find third part of magnetic field */
/* vx = amx*sbxyz[nn] + amy*sbxyz[nn+4]; */
v_vx = _mm512_mul_ps(v_amx,a);
v_vx = _mm512_fmadd_ps(v_amy,p,v_vx);
/* vy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; */
v_vy = _mm512_mul_ps(v_amx,b);
v_vy = _mm512_fmadd_ps(v_amy,q,v_vy);
/* vz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; */
v_vz = _mm512_mul_ps(v_amx,c);
v_vz = _mm512_fmadd_ps(v_amy,r,v_vz);
/* mm = nn + 4*mxv; */
/* load sbxyz[mm:mm+3] and sbxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sbxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sbxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sbxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sbxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sbxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sbxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sbxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find fourth part of magnetic field */
/* ox = ox + dzp*(vx + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); */
v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx);
v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx);
v_ox = _mm512_fmadd_ps(v_dzp,v_vx,v_ox);
/* oy = oy + dzp*(vy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); */
v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy);
v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy);
v_oy = _mm512_fmadd_ps(v_dzp,v_vy,v_oy);
/* oz = oz + dzp*(vz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); */
v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz);
v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz);
v_oz = _mm512_fmadd_ps(v_dzp,v_vz,v_oz);
/* calculate half impulse */
/* dx *= qtmh; */
/* dy *= qtmh; */
/* dz *= qtmh; */
v_dx = _mm512_mul_ps(v_dx,v_qtmh);
v_dy = _mm512_mul_ps(v_dy,v_qtmh);
v_dz = _mm512_mul_ps(v_dz,v_qtmh);
/* half acceleration */
/* acx = ppart[j+3*nppmx+npoff] + dx; */
/* acy = ppart[j+4*nppmx+npoff] + dy; */
/* acz = ppart[j+5*nppmx+npoff] + dz; */
a = _mm512_add_ps(v_dx,_mm512_load_ps(&ppart[j+3*nppmx+npoff]));
b = _mm512_add_ps(v_dy,_mm512_load_ps(&ppart[j+4*nppmx+npoff]));
c = _mm512_add_ps(v_dz,_mm512_load_ps(&ppart[j+5*nppmx+npoff]));
/* find inverse gamma */
/* p2 = acx*acx + acy*acy + acz*acz; */
v_at = _mm512_fmadd_ps(b,b,_mm512_mul_ps(a,a));
v_at = _mm512_fmadd_ps(c,c,v_at);
/* gami = 1.0f/sqrtf(1.0f + p2*ci2); */
/* approximate calculation */
/* v_gami = _mm512_rsqrt23_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */
/* full accuracy calculation */
v_gami = _mm512_sqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one));
v_gami = _mm512_div_ps(v_one,v_gami);
/* full accuracy calculation with SVML */
/* v_gami = _mm512_invsqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */
/* time-centered kinetic energy */
/* sum1 += gami*p2/(1.0f + gami); */
v_at = _mm512_mul_ps(v_gami,v_at);
v_at = _mm512_div_ps(v_at,_mm512_add_ps(v_one,v_gami));
/* convert to double precision before accumulating */
v_sum1 = _mm512_add_pd(v_sum1,_mm512_cvtpslo_pd(v_at));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_at,78));
v_sum1 = _mm512_add_pd(v_sum1,v_d);
/* renormalize magnetic field */
/* qtmg = qtmh*gami; */
v_at = _mm512_mul_ps(v_qtmh,v_gami);
/* calculate cyclotron frequency */
/* omxt = qtmg*ox; */
/* omyt = qtmg*oy; */
/* omzt = qtmg*oz; */
e = _mm512_mul_ps(v_at,v_ox);
f = _mm512_mul_ps(v_at,v_oy);
g = _mm512_mul_ps(v_at,v_oz);
/* calculate rotation matrix */
/* vx = omxt*omxt; */
v_vx = _mm512_mul_ps(e,e);
/* vy = omyt*omyt; */
v_vy = _mm512_mul_ps(f,f);
/* vz = omzt*omzt; */
v_vz = _mm512_mul_ps(g,g);
/* omt = omxt*omxt + omyt*omyt + omzt*omzt; */
v_at = _mm512_add_ps(_mm512_add_ps(v_vx,v_vy),v_vz);
/* anorm = 2.0f/(1.0f + omt); */
d = _mm512_div_ps(v_two,_mm512_add_ps(v_one,v_at));
/* omt = 0.5f*(1.0f - omt); */
h = _mm512_mul_ps(v_half,_mm512_sub_ps(v_one,v_at));
/* vx = (omt + vx)*acx; */
v_vx = _mm512_mul_ps(_mm512_add_ps(h,v_vx),a);
/* vy = (omt + vy)*acy; */
v_vy = _mm512_mul_ps(_mm512_add_ps(h,v_vy),b);
/* vz = (omt + vz)*acz; */
v_vz = _mm512_mul_ps(_mm512_add_ps(h,v_vz),c);
/* omt = omxt*omyt; */
h = _mm512_mul_ps(e,f);
/* vx = vx + (omzt + omt)*acy; */
v_vx = _mm512_fmadd_ps(_mm512_add_ps(h,g),b,v_vx);
/* vy = vy + (omt - omzt)*acx; */
v_vy = _mm512_fmadd_ps(_mm512_sub_ps(h,g),a,v_vy);
/* omt = omxt*omzt; */
h = _mm512_mul_ps(e,g);
/* vx = vx + (omt - omyt)*acz; */
v_vx = _mm512_fmadd_ps(_mm512_sub_ps(h,f),c,v_vx);
/* vz = vz + (omt + omyt)*acx; */
v_vz = _mm512_fmadd_ps(_mm512_add_ps(h,f),a,v_vz);
/* omt = omyt*omzt; */
h = _mm512_mul_ps(f,g);
/* vy = vy + (omt + omxt)*acz; */
v_vy = _mm512_fmadd_ps(_mm512_add_ps(h,e),c,v_vy);
/* vz = vz + (omt - omxt)*acy; */
v_vz = _mm512_fmadd_ps(_mm512_sub_ps(h,e),b,v_vz);
/* new momentum */
/* vx = dx + (rot1*acx + rot2*acy + rot3*acz)*anorm; */
/* vy = dy + (rot4*acx + rot5*acy + rot6*acz)*anorm; */
/* vz = dz + (rot7*acx + rot8*acy + rot9*acz)*anorm; */
v_vx = _mm512_fmadd_ps(v_vx,d,v_dx);
v_vy = _mm512_fmadd_ps(v_vy,d,v_dy);
v_vz = _mm512_fmadd_ps(v_vz,d,v_dz);
/* update inverse gamma */
/* p2 = vx*vx + vy*vy + vz*vz; */
v_at = _mm512_fmadd_ps(v_vy,v_vy,_mm512_mul_ps(v_vx,v_vx));
v_at = _mm512_fmadd_ps(v_vz,v_vz,v_at);
/* dtg = dtc/sqrtf(1.0f + p2*ci2); */
/* approximate calculation */
/* v_at = _mm512_rsqrt23_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */
/* v_at = _mm512_mul_ps(v_dtc,v_at); */
/* full accuracy calculation */
v_at = _mm512_sqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one));
v_at = _mm512_div_ps(v_dtc,v_at);
/* full accuracy calculation with SVML */
/* v_gami = _mm512_invsqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */
/* v_at = _mm512_div_ps(v_dtc,v_at); */
/* new position */
/* dx = x + vx*dtg; */
/* dy = y + vy*dtg; */
/* dz = z + vz*dtg; */
v_dx = _mm512_fmadd_ps(v_vx,v_at,v_x);
v_dy = _mm512_fmadd_ps(v_vy,v_at,v_y);
v_dz = _mm512_fmadd_ps(v_vz,v_at,v_z);
/* find particles going out of bounds */
/* mm = 0; */
v_mm = _mm512_setzero_epi32();
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
/* if (dx >= edgerx) { */
/* if (dx >= anx) */
/* ppart[j+npoff] = dx - anx; */
/* mm = 2; */
/* } */
msk1 = _mm512_cmp_ps_mask(v_dx,v_edgerx,_MM_CMPINT_GE);
msk2 = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT);
ii = _mm512_mask2int(_mm512_kor(msk1,msk2));
/* execute if either test result is true for any particle */
if (ii != 0) {
ii = _mm512_mask2int(msk1);
v_x = v_dx;
/* write output if test result is true for any particle */
if (ii != 0) {
v_it = _mm512_add_epi32(v_1,v_1);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it);
msk1 = _mm512_cmp_ps_mask(v_dx,v_anx,_MM_CMPINT_GE);
v_x = _mm512_mask_sub_ps(v_x,msk1,v_dx,v_anx);
ii = _mm512_mask2int(msk1);
if (ii != 0)
v_dx = v_x;
}
/* if (dx < edgelx) { */
/* if (dx < 0.0) { */
/* dx += anx; */
/* if (dx < anx) */
/* mm = 1; */
/* else */
/* dx = 0.0; */
/* ppart[j+npoff] = dx; */
/* } */
/* else { */
/* mm = 1; */
/* } */
/* } */
/* write output if test result is true for any particle */
ii = _mm512_mask2int(msk2);
if (ii != 0) {
v_it = _mm512_mask_mov_epi32(v_0,msk2,v_1);
msk2 = _mm512_cmp_ps_mask(v_dx,v_zero,_MM_CMPINT_LT);
v_x = _mm512_mask_add_ps(v_x,msk2,v_dx,v_anx);
msk1 = _mm512_cmp_ps_mask(v_x,v_anx,_MM_CMPINT_GE);
msk1 = _mm512_kand(msk1,msk2);
v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero);
v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0);
v_mm = _mm512_add_epi32(v_mm,v_it);
ii = _mm512_mask2int(msk2);
if (ii != 0)
v_dx = v_x;
}
}
/* if (dy >= edgery) { */
/* if (dy >= any) */
/* ppart[j+nppmx+npoff] = dy - any; */
/* mm += 6; */
/* } */
msk1 = _mm512_cmp_ps_mask(v_dy,v_edgery,_MM_CMPINT_GE);
msk2 = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT);
ii = _mm512_mask2int(_mm512_kor(msk1,msk2));
/* execute if either test result is true for any particle */
if (ii != 0) {
ii = _mm512_mask2int(msk1);
v_x = v_dy;
/* write output if test result is true for any particle */
if (ii != 0) {
v_it = _mm512_add_epi32(v_3,v_3);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it);
msk1 = _mm512_cmp_ps_mask(v_dy,v_any,_MM_CMPINT_GE);
v_x = _mm512_mask_sub_ps(v_x,msk1,v_dy,v_any);
ii = _mm512_mask2int(msk1);
if (ii != 0)
v_dy = v_x;
}
/* if (dy < edgely) { */
/* if (dy < 0.0) { */
/* dy += any; */
/* if (dy < any) */
/* mm += 3; */
/* else */
/* dy = 0.0; */
/* ppart[j+nppmx+npoff] = dy; */
/* } */
/* else { */
/* mm += 3; */
/* } */
/* } */
/* write output if test result is true for any particle */
ii = _mm512_mask2int(msk2);
if (ii != 0) {
v_it = _mm512_mask_mov_epi32(v_0,msk2,v_3);
msk2 = _mm512_cmp_ps_mask(v_dy,v_zero,_MM_CMPINT_LT);
v_x = _mm512_mask_add_ps(v_x,msk2,v_dy,v_any);
msk1 = _mm512_cmp_ps_mask(v_x,v_any,_MM_CMPINT_GE);
msk1 = _mm512_kand(msk1,msk2);
v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero);
v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0);
v_mm = _mm512_add_epi32(v_mm,v_it);
ii = _mm512_mask2int(msk2);
if (ii != 0)
v_dy = v_x;
}
}
/* if (dz >= edgerz) { */
/* if (dz >= anz) */
/* ppart[j+2*nppmx+npoff] = dz - anz; */
/* mm += 18; */
/* } */
msk1 = _mm512_cmp_ps_mask(v_dz,v_edgerz,_MM_CMPINT_GE);
msk2 = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT);
ii = _mm512_mask2int(_mm512_kor(msk1,msk2));
/* execute if either test result is true for any particle */
if (ii != 0) {
ii = _mm512_mask2int(msk1);
v_x = v_dz;
/* write output if test result is true for any particle */
if (ii != 0) {
v_it = _mm512_add_epi32(v_9,v_9);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it);
msk1 = _mm512_cmp_ps_mask(v_dz,v_anz,_MM_CMPINT_GE);
v_x = _mm512_mask_sub_ps(v_x,msk1,v_dz,v_anz);
ii = _mm512_mask2int(msk1);
if (ii != 0)
v_dz = v_x;
}
/* if (dz < edgelz) { */
/* if (dz < 0.0) { */
/* dz += anz; */
/* if (dz < anz) */
/* mm += 9; */
/* else */
/* dz = 0.0; */
/* ppart[j+2*nppmx+npoff] = dz; */
/* } */
/* else { */
/* mm += 9; */
/* } */
/* } */
/* write output if test result is true for any particle */
ii = _mm512_mask2int(msk2);
if (ii != 0) {
v_it = _mm512_mask_mov_epi32(v_0,msk2,v_9);
msk2 = _mm512_cmp_ps_mask(v_dz,v_zero,_MM_CMPINT_LT);
v_x = _mm512_mask_add_ps(v_x,msk2,v_dz,v_anz);
msk1 = _mm512_cmp_ps_mask(v_x,v_anz,_MM_CMPINT_GE);
msk1 = _mm512_kand(msk1,msk2);
v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero);
v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0);
v_mm = _mm512_add_epi32(v_mm,v_it);
ii = _mm512_mask2int(msk2);
if (ii != 0)
v_dz = v_x;
}
}
/* set new position */
/* ppart[j+npoff] = dx; */
/* ppart[j+nppmx+npoff] = dy; */
/* ppart[j+2*nppmx+npoff] = dz; */
_mm512_store_ps(&ppart[j+npoff],v_dx);
_mm512_store_ps(&ppart[j+nppmx+npoff],v_dy);
_mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz);
/* set new momentum */
/* ppart[j+3*nppmx+npoff] = vx; */
/* ppart[j+4*nppmx+npoff] = vy; */
/* ppart[j+5*nppmx+npoff] = vz; */
_mm512_store_ps(&ppart[j+3*nppmx+npoff],v_vx);
_mm512_store_ps(&ppart[j+4*nppmx+npoff],v_vy);
_mm512_store_ps(&ppart[j+5*nppmx+npoff],v_vz);
/* increment counters */
/* if (mm > 0) { */
/* ncl[mm+26*l-1] += 1; */
/* ih += 1; */
/* if (ih <= ntmax) { */
/* ihole[2*(ih+(ntmax+1)*l)] = j + i + 1; */
/* ihole[1+2*(ih+(ntmax+1)*l)] = mm; */
/* } */
/* else { */
/* nh = 1; */
/* } */
/* } */
_mm512_store_epi32(kk,v_mm);
for (i = 0; i < 16; i++) {
mm = kk[i];
if (mm > 0) {
ncl[mm+26*l-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*l)] = j + i + 1;
ihole[1+2*(ih+(ntmax+1)*l)] = mm;
}
else {
nh = 1;
}
}
}
}
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
z = ppart[j+2*nppmx+npoff];
nn = x;
mm = y;
ll = z;
dxp = x - (float) nn;
dyp = y - (float) mm;
dzp = z - (float) ll;
nm = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff));
amx = 1.0f - dxp;
amy = 1.0f - dyp;
dx1 = dxp*dyp;
dyp = amx*dyp;
amx = amx*amy;
amz = 1.0f - dzp;
amy = dxp*amy;
/* find electric field */
nn = nm;
dx = amx*sfxyz[nn] + amy*sfxyz[nn+4];
dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4];
dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4];
mm = nn + 4*mxv;
dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]);
dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]);
dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]);
nn += 4*mxyv;
acx = amx*sfxyz[nn] + amy*sfxyz[nn+4];
acy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4];
acz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4];
mm = nn + 4*mxv;
dx = dx + dzp*(acx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]);
dy = dy + dzp*(acy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]);
dz = dz + dzp*(acz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]);
/* find magnetic field */
nn = nm;
ox = amx*sbxyz[nn] + amy*sbxyz[nn+4];
oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4];
oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4];
mm = nn + 4*mxv;
ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]);
oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]);
oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]);
nn += 4*mxyv;
acx = amx*sbxyz[nn] + amy*sbxyz[nn+4];
acy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4];
acz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4];
mm = nn + 4*mxv;
ox = ox + dzp*(acx + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]);
oy = oy + dzp*(acy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]);
oz = oz + dzp*(acz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]);
/* calculate half impulse */
dx *= qtmh;
dy *= qtmh;
dz *= qtmh;
/* half acceleration */
acx = ppart[j+3*nppmx+npoff] + dx;
acy = ppart[j+4*nppmx+npoff] + dy;
acz = ppart[j+5*nppmx+npoff] + dz;
/* find inverse gamma */
p2 = acx*acx + acy*acy + acz*acz;
gami = 1.0f/sqrtf(1.0f + p2*ci2);
/* renormalize magnetic field */
qtmg = qtmh*gami;
/* time-centered kinetic energy */
sum1 += gami*p2/(1.0f + gami);
/* calculate cyclotron frequency */
omxt = qtmg*ox;
omyt = qtmg*oy;
omzt = qtmg*oz;
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0f/(1.0f + omt);
omt = 0.5f*(1.0f - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new momentum */
vx = dx + (rot1*acx + rot2*acy + rot3*acz)*anorm;
vy = dy + (rot4*acx + rot5*acy + rot6*acz)*anorm;
vz = dz + (rot7*acx + rot8*acy + rot9*acz)*anorm;
/* update inverse gamma */
p2 = vx*vx + vy*vy + vz*vz;
dtg = dtc/sqrtf(1.0f + p2*ci2);
/* new position */
dx = x + vx*dtg;
dy = y + vy*dtg;
dz = z + vz*dtg;
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx = dx - anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0f;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy = dy - any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0f) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0f;
}
else {
mm += 3;
}
}
if (dz >= edgerz) {
if (dz >= anz)
dz = dz - anz;
mm += 18;
}
else if (dz < edgelz) {
if (dz < 0.0f) {
dz += anz;
if (dz < anz)
mm += 9;
else
dz = 0.0f;
}
else {
mm += 9;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
ppart[j+2*nppmx+npoff] = dz;
/* set new momentum */
ppart[j+3*nppmx+npoff] = vx;
ppart[j+4*nppmx+npoff] = vy;
ppart[j+5*nppmx+npoff] = vz;
/* increment counters */
if (mm > 0) {
ncl[mm+26*l-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*l)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*l)] = mm;
}
else {
nh = 1;
}
}
}
/* sum2 += sum1; */
_mm512_store_pd(&dd[0],v_sum1);
for (j = 1; j < 8; j++) {
dd[0] += dd[j];
}
sum2 += (sum1 + dd[0]);
/* set error and end of file flag */
if (nh > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*l] = ih;
}
/* normalize kinetic energy */
*ek += sum2;
return;
#undef MXV
#undef MYV
#undef MZV
}
/*--------------------------------------------------------------------*/
void ckncgppost3lt(float ppart[], float q[], int kpic[], float qm,
int nppmx, int idimp, int mx, int my, int mz,
int nxv, int nyv, int nzv, int mx1, int my1,
int mxyz1) {
/* for 3d code, this subroutine calculates particle charge density
using first-order linear interpolation, periodic boundaries
OpenMP/vector version using guard cells
data deposited in tiles
particles stored segmented array
33 flops/particle, 11 loads, 8 stores
input: all, output: q
charge density is approximated by values at the nearest grid points
q(n,m,l)=qm*(1.-dx)*(1.-dy)*(1.-dz)
q(n+1,m,l)=qm*dx*(1.-dy)*(1.-dz)
q(n,m+1,l)=qm*(1.-dx)*dy*(1.-dz)
q(n+1,m+1,l)=qm*dx*dy*(1.-dz)
q(n,m,l+1)=qm*(1.-dx)*(1.-dy)*dz
q(n+1,m,l+1)=qm*dx*(1.-dy)*dz
q(n,m+1,l+1)=qm*(1.-dx)*dy*dz
q(n+1,m+1,l+1)=qm*dx*dy*dz
where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = position z of particle n in tile m
q[l][k][j] = charge density at grid point j,k,l
kpic = number of particles per tile
qm = charge on particle, in units of e
nppmx = maximum number of particles in tile
idimp = size of phase space = 6
mx/my/mz = number of grids in sorting cell in x/y/z
nxv = first dimension of charge array, must be >= nx+1
nyv = second dimension of charge array, must be >= ny+1
nzv = third dimension of charge array, must be >= nz+1
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mxyz1 = mx1*my1*mz1,
where mz1 = (system length in z direction - 1)/mz + 1
requires KNC, ppart needs to be 64 byte aligned
nppmx needs to be a multiple of 16
local data */
#define MXV 17
#define MYV 17
#define MZV 17
int mxy1, noff, moff, loff, npoff, npp, nps;
int i, j, k, l, m, nn, mm, ll, nm, lm, mxv, myv, mxyv, nxyv;
float x, y, z, w, dx1, dxp, dyp, dzp, amx, amy, amz;
__m512i v_noff, v_moff, v_loff, v_mxv, v_mxyv;
__m512i v_nn, v_mm, v_ll, v_it;
__m512 v_qm, v_one;
__m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz;
__m512 v_dx1, v_as, v_at;
__m512 a, b, c, d, e, f, g, h, qp, qr;
__mmask16 msk, msks, v_m;
__attribute__((aligned(64))) unsigned int kk[16];
__attribute__((aligned(64))) float sq[MXV*MYV*MZV];
/* __attribute__((aligned(64))) float sq[(mx+1)*(my+1)*(mz+1)]; */
mxy1 = mx1*my1;
/* mxv = MXV; */
/* myv = MYV; */
mxv = mx + 1;
myv = my + 1;
mxyv = mxv*myv;
nxyv = nxv*nyv;
v_mxv = _mm512_set1_epi32(mxv);
v_mxyv = _mm512_set1_epi32(mxyv);
v_qm = _mm512_set1_ps(qm);
v_one = _mm512_set1_ps(1.0f);
v_at = _mm512_set_ps(0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,
1.);
v_m = _mm512_cmp_ps_mask(v_at,v_one,_MM_CMPINT_LT);
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */
/* return; */
#pragma omp parallel for \
private(i,j,k,l,m,noff,moff,loff,npp,npoff,nps,nn,mm,ll,nm,lm,x,y,z,w, \
dxp,dyp,dzp,amx,amy,amz,dx1,v_noff,v_moff,v_loff,v_nn,v_mm,v_ll,v_it, \
v_x,v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx,v_amy,v_amz,v_dx1,v_at,v_as,a,b,c, \
d,e,f,g,h,qp,qr,msk,msks,kk,sq)
for (l = 0; l < mxyz1; l++) {
loff = l/mxy1;
k = l - mxy1*loff;
loff = mz*loff;
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
v_noff = _mm512_set1_epi32(noff);
v_moff = _mm512_set1_epi32(moff);
v_loff = _mm512_set1_epi32(loff);
npp = kpic[l];
npoff = idimp*nppmx*l;
/* zero out local accumulator */
/* for (j = 0; j < mxyv*(mz+1); j++) { */
/* sq[j] = 0.0f; */
/* } */
memset((void*)sq,0,mxyv*(mz+1)*sizeof(float));
nps = 16*(npp/16);
/* loop over particles in tile in blocks of 16 */
for (j = 0; j < nps; j+=16) {
/* find interpolation weights */
/* x = ppart[j+npoff]; */
/* y = ppart[j+nppmx+npoff]; */
/* z = ppart[j+2*nppmx+npoff]; */
v_x = _mm512_load_ps(&ppart[j+npoff]);
v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]);
v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]);
/* nn = x; */
/* mm = y; */
/* ll = z; */
v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
/* dxp = qm*(x - (float) nn); */
v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dxp = _mm512_mul_ps(v_qm,_mm512_sub_ps(v_x,v_dxp));
/* dyp = y - (float) mm; */
v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dyp = _mm512_sub_ps(v_y,v_dyp);
/* dzp = z - (float) ll; */
v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dzp = _mm512_sub_ps(v_z,v_dzp);
/* nn = nn - noff + mxv*(mm - moff) + mxyv*(ll - loff); */
v_nn = _mm512_sub_epi32(v_nn,v_noff);
v_mm = _mm512_sub_epi32(v_mm,v_moff);
v_ll = _mm512_sub_epi32(v_ll,v_loff);
v_it = _mm512_mullo_epi32(v_mxyv,v_ll);
v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv,v_mm));
v_nn = _mm512_add_epi32(v_nn,v_it);
/* amx = qm - dxp; */
/* amy = 1.0f - dyp; */
/* amz = 1.0f - dzp; */
v_amx = _mm512_sub_ps(v_qm,v_dxp);
v_amy = _mm512_sub_ps(v_one,v_dyp);
v_amz = _mm512_sub_ps(v_one,v_dzp);
/* dx1 = dxp*dyp; */
/* dyp = amx*dyp; */
/* amx = amx*amy; */
/* amy = dxp*amy; */
v_dx1 = _mm512_mul_ps(v_dxp,v_dyp);
v_dyp = _mm512_mul_ps(v_amx,v_dyp);
v_amx = _mm512_mul_ps(v_amx,v_amy);
v_amy = _mm512_mul_ps(v_dxp,v_amy);
/* a = amx*amz; */
/* b = amy*amz; */
/* d = dyp*amz; */
/* d = dx1*amz; */
a = _mm512_mul_ps(v_amx,v_amz);
b = _mm512_mul_ps(v_amy,v_amz);
c = _mm512_mul_ps(v_dyp,v_amz);
d = _mm512_mul_ps(v_dx1,v_amz);
/* e = amx*dzp; */
/* f = amy*dzp; */
/* g = dyp*dzp; */
/* h = dx1*dzp; */
e = _mm512_mul_ps(v_amx,v_dzp);
f = _mm512_mul_ps(v_amy,v_dzp);
g = _mm512_mul_ps(v_dyp,v_dzp);
h = _mm512_mul_ps(v_dx1,v_dzp);
_mm512_store_epi32(kk,v_nn);
/* deposit charge */
/* x = sq[nn] + amx*amz; */
/* y = sq[nn+1] + amy*amz; */
/* z = sq[nn+mxv] + dyp*amz; */
/* w = sq[nn+1+mxv] + dx1*amz; */
/* sq[nn] = x; */
/* sq[nn+1] = y; */
/* sq[nn+mxv] = z; */
/* sq[nn+1+mxv] = w; */
/* mm = nn + mxyv; */
/* x = sq[mm] + amx*dzp; */
/* y = sq[mm+1] + amy*dzp; */
/* z = sq[mm+mxv] + dyp*dzp; */
/* w = sq[mm+1+mxv] + dx1*dzp; */
/* sq[mm] = x; */
/* sq[mm+1] = y; */
/* sq[mm+mxv] = z; */
/* sq[mm+1+mxv] = w; */
/* deposit charge for two particles at a time */
for (i = 0; i < 8; i++) {
/* first particle */
mm = kk[2*i];
msk = _mm512_int2mask(3<<(2*i));
msks = _mm512_int2mask(2<<(2*i));
qp = _mm512_mask_loadunpacklo_ps(qp,msk,&sq[mm]);
qp = _mm512_mask_loadunpackhi_ps(qp,msk,&sq[mm+16]);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)a,msks,
(__m512i)b,177);
qp = _mm512_mask_add_ps(qp,msk,qp,v_at);
_mm512_mask_packstorelo_ps(&sq[mm],msk,qp);
_mm512_mask_packstorehi_ps(&sq[mm+16],msk,qp);
ll = mm + mxv;
qr = _mm512_mask_loadunpacklo_ps(qr,msk,&sq[ll]);
qr = _mm512_mask_loadunpackhi_ps(qr,msk,&sq[ll+16]);
v_as = (__m512)_mm512_mask_shuffle_epi32((__m512i)c,msks,
(__m512i)d,177);
qr = _mm512_mask_add_ps(qr,msk,qr,v_as);
_mm512_mask_packstorelo_ps(&sq[ll],msk,qr);
_mm512_mask_packstorehi_ps(&sq[ll+16],msk,qr);
mm = mm + mxyv;
qp = _mm512_mask_loadunpacklo_ps(qp,msk,&sq[mm]);
qp = _mm512_mask_loadunpackhi_ps(qp,msk,&sq[mm+16]);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)e,msks,
(__m512i)f,177);
qp = _mm512_mask_add_ps(qp,msk,qp,v_at);
_mm512_mask_packstorelo_ps(&sq[mm],msk,qp);
_mm512_mask_packstorehi_ps(&sq[mm+16],msk,qp);
ll = mm + mxv;
qr = _mm512_mask_loadunpacklo_ps(qr,msk,&sq[ll]);
qr = _mm512_mask_loadunpackhi_ps(qr,msk,&sq[ll+16]);
v_as = (__m512)_mm512_mask_shuffle_epi32((__m512i)g,msks,
(__m512i)h,177);
qr = _mm512_mask_add_ps(qr,msk,qr,v_as);
_mm512_mask_packstorelo_ps(&sq[ll],msk,qr);
_mm512_mask_packstorehi_ps(&sq[ll+16],msk,qr);
/* second particle */
mm = kk[2*i+1];
msks = _mm512_int2mask(1<<(2*i));
qp = _mm512_mask_loadunpacklo_ps(qp,msk,&sq[mm]);
qp = _mm512_mask_loadunpackhi_ps(qp,msk,&sq[mm+16]);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)b,msks,
(__m512i)a,177);
qp = _mm512_mask_add_ps(qp,msk,qp,v_at);
_mm512_mask_packstorelo_ps(&sq[mm],msk,qp);
_mm512_mask_packstorehi_ps(&sq[mm+16],msk,qp);
ll = mm + mxv;
qr = _mm512_mask_loadunpacklo_ps(qr,msk,&sq[ll]);
qr = _mm512_mask_loadunpackhi_ps(qr,msk,&sq[ll+16]);
v_as = (__m512)_mm512_mask_shuffle_epi32((__m512i)d,msks,
(__m512i)c,177);
qr = _mm512_mask_add_ps(qr,msk,qr,v_as);
_mm512_mask_packstorelo_ps(&sq[ll],msk,qr);
_mm512_mask_packstorehi_ps(&sq[ll+16],msk,qr);
mm = mm + mxyv;
qp = _mm512_mask_loadunpacklo_ps(qp,msk,&sq[mm]);
qp = _mm512_mask_loadunpackhi_ps(qp,msk,&sq[mm+16]);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)f,msks,
(__m512i)e,177);
qp = _mm512_mask_add_ps(qp,msk,qp,v_at);
_mm512_mask_packstorelo_ps(&sq[mm],msk,qp);
_mm512_mask_packstorehi_ps(&sq[mm+16],msk,qp);
ll = mm + mxv;
qr = _mm512_mask_loadunpacklo_ps(qr,msk,&sq[ll]);
qr = _mm512_mask_loadunpackhi_ps(qr,msk,&sq[ll+16]);
v_as = (__m512)_mm512_mask_shuffle_epi32((__m512i)h,msks,
(__m512i)g,177);
qr = _mm512_mask_add_ps(qr,msk,qr,v_as);
_mm512_mask_packstorelo_ps(&sq[ll],msk,qr);
_mm512_mask_packstorehi_ps(&sq[ll+16],msk,qr);
}
}
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
z = ppart[j+2*nppmx+npoff];
nn = x;
mm = y;
ll = z;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
dzp = z - (float) ll;
nn = nn - noff + mxv*(mm - moff) + mxyv*(ll - loff);
amx = qm - dxp;
amy = 1.0f - dyp;
amz = 1.0f - dzp;
dx1 = dxp*dyp;
dyp = amx*dyp;
amx = amx*amy;
amy = dxp*amy;
/* deposit charge */
x = sq[nn] + amx*amz;
y = sq[nn+1] + amy*amz;
z = sq[nn+mxv] + dyp*amz;
w = sq[nn+1+mxv] + dx1*amz;
sq[nn] = x;
sq[nn+1] = y;
sq[nn+mxv] = z;
sq[nn+1+mxv] = w;
mm = nn + mxyv;
x = sq[mm] + amx*dzp;
y = sq[mm+1] + amy*dzp;
z = sq[mm+mxv] + dyp*dzp;
w = sq[mm+1+mxv] + dx1*dzp;
sq[mm] = x;
sq[mm+1] = y;
sq[mm+mxv] = z;
sq[mm+1+mxv] = w;
}
/* deposit charge to interior points in global array */
nn = nxv - noff;
nn = mx < nn ? mx : nn;
mm = nyv - moff;
mm = my < mm ? my : mm;
ll = nzv - loff;
ll = mz < ll ? mz : ll;
nps = 16*(nn/16);
for (k = 1; k < ll; k++) {
for (j = 1; j < mm; j++) {
/* vector loop over elements in blocks of 4 */
/* for (i = 1; i < nn; i++) { */
/* q[i+noff+nxv*(j+moff)+nxyv*(k+loff)] */
/* += sq[i+mxv*j+mxyv*k]; */
/* } */
for (i = 0; i < nps; i+=16) {
m = i + mxv*j + mxyv*k;
v_as = _mm512_loadunpacklo_ps(v_as,&sq[m]);
v_as = _mm512_loadunpackhi_ps(v_as,&sq[m+16]);
m = i + noff + nxv*(j + moff) + nxyv*(k + loff);
v_at = _mm512_loadunpacklo_ps(v_at,&q[m]);
v_at = _mm512_loadunpackhi_ps(v_at,&q[m+16]);
/* skip add for first element for i = 0 */
if (i==0)
v_at = _mm512_mask_add_ps(v_at,v_m,v_at,v_as);
else
v_at = _mm512_add_ps(v_at,v_as);
_mm512_packstorelo_ps(&q[m],v_at);
_mm512_packstorehi_ps(&q[m+16],v_at);
}
/* loop over remaining elements */
m = 1 > nps ? 1 : nps;
for (i = m ; i < nn; i++) {
q[i+noff+nxv*(j+moff)+nxyv*(k+loff)]
+= sq[i+mxv*j+mxyv*k];
}
}
}
/* deposit charge to edge points in global array */
lm = nzv - loff;
lm = mz+1 < lm ? mz+1 : lm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
q[i+noff+nxv*(j+moff)+nxyv*loff] += sq[i+mxv*j];
if (lm > mz) {
#pragma omp atomic
q[i+noff+nxv*(j+moff)+nxyv*(lm+loff-1)]
+= sq[i+mxv*j+mxyv*(lm-1)];
}
}
}
nm = nxv - noff;
nm = mx+1 < nm ? mx+1 : nm;
mm = nyv - moff;
mm = my+1 < mm ? my+1 : mm;
for (k = 0; k < ll; k++) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
q[i+noff+nxv*moff+nxyv*(k+loff)] += sq[i+mxyv*k];
if (mm > my) {
#pragma omp atomic
q[i+noff+nxv*(mm+moff-1)+nxyv*(k+loff)]
+= sq[i+mxv*(mm-1)+mxyv*k];
}
}
for (j = 0; j < mm; j++) {
#pragma omp atomic
q[noff+nxv*(j+moff)+nxyv*(k+loff)] += sq[mxv*j+mxyv*k];
if (nm > mx) {
#pragma omp atomic
q[nm+noff-1+nxv*(j+moff)+nxyv*(k+loff)]
+= sq[nm-1+mxv*j+mxyv*k];
}
}
}
if (lm > mz) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
q[i+noff+nxv*moff+nxyv*(lm+loff-1)] += sq[i+mxyv*(lm-1)];
if (mm > my) {
#pragma omp atomic
q[i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1)]
+= sq[i+mxv*(mm-1)+mxyv*(lm-1)];
}
}
for (j = 0; j < mm; j++) {
#pragma omp atomic
q[noff+nxv*(j+moff)+nxyv*(lm+loff-1)]
+= sq[mxv*j+mxyv*(lm-1)];
if (nm > mx) {
#pragma omp atomic
q[nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1)]
+= sq[nm-1+mxv*j+mxyv*(lm-1)];
}
}
}
}
return;
#undef MXV
#undef MYV
#undef MZV
}
/*--------------------------------------------------------------------*/
void cknc2gppost3lt(float ppart[], float q[], int kpic[], float qm,
int nppmx, int idimp, int mx, int my, int mz,
int nxv, int nyv, int nzv, int mx1, int my1,
int mxyz1) {
/* for 3d code, this subroutine calculates particle charge density
using first-order linear interpolation, periodic boundaries
OpenMP/vector version using guard cells
data deposited in tiles
particles stored segmented array
33 flops/particle, 11 loads, 8 stores
input: all, output: q
charge density is approximated by values at the nearest grid points
q(n,m,l)=qm*(1.-dx)*(1.-dy)*(1.-dz)
q(n+1,m,l)=qm*dx*(1.-dy)*(1.-dz)
q(n,m+1,l)=qm*(1.-dx)*dy*(1.-dz)
q(n+1,m+1,l)=qm*dx*dy*(1.-dz)
q(n,m,l+1)=qm*(1.-dx)*(1.-dy)*dz
q(n+1,m,l+1)=qm*dx*(1.-dy)*dz
q(n,m+1,l+1)=qm*(1.-dx)*dy*dz
q(n+1,m+1,l+1)=qm*dx*dy*dz
where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = position z of particle n in tile m
q[l][k][j] = charge density at grid point j,k,l
kpic = number of particles per tile
qm = charge on particle, in units of e
nppmx = maximum number of particles in tile
idimp = size of phase space = 6
mx/my/mz = number of grids in sorting cell in x/y/z
nxv = first dimension of charge array, must be >= nx+1
nyv = second dimension of charge array, must be >= ny+1
nzv = third dimension of charge array, must be >= nz+1
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mxyz1 = mx1*my1*mz1,
where mz1 = (system length in z direction - 1)/mz + 1
requires KNC, ppart needs to be 64 byte aligned
nppmx needs to be a multiple of 16
local data */
#define MXV 17
#define MYV 17
#define MZV 17
int mxy1, noff, moff, loff, npoff, npp, nps;
int i, j, k, l, m, nn, mm, ll, nm, lm, mxv, myv, mxyv, nxyv;
float x, y, z, w, dx1, dxp, dyp, dzp, amx, amy, amz;
__m512i v_noff, v_moff, v_loff, v_mxv, v_mxyv;
__m512i v_nn, v_mm, v_ll, v_it;
__m512 v_qm, v_one;
__m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz;
__m512 v_dx1, v_as, v_at;
__mmask16 v_m;
__attribute__((aligned(64))) unsigned int kk[16];
typedef union vfloat {float v[16]; __m512 v16;} vf;
__attribute__((aligned(64))) float sq[MXV*MYV*MZV];
/* __attribute__((aligned(64))) float sq[(mx+1)*(my+1)*(mz+1)]; */
vf vv[8];
mxy1 = mx1*my1;
/* mxv = MXV; */
/* myv = MYV; */
mxv = mx + 1;
myv = my + 1;
mxyv = mxv*myv;
nxyv = nxv*nyv;
v_mxv = _mm512_set1_epi32(mxv);
v_mxyv = _mm512_set1_epi32(mxyv);
v_qm = _mm512_set1_ps(qm);
v_one = _mm512_set1_ps(1.0f);
v_at = _mm512_set_ps(0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,
1.);
v_m = _mm512_cmp_ps_mask(v_at,v_one,_MM_CMPINT_LT);
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */
/* return; */
#pragma omp parallel for \
private(i,j,k,l,m,noff,moff,loff,npp,npoff,nps,nn,mm,ll,nm,lm,x,y,z,w, \
dxp,dyp,dzp,amx,amy,amz,dx1,v_noff,v_moff,v_loff,v_nn,v_mm,v_ll,v_it, \
v_x,v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx,v_amy,v_amz,v_dx1,v_at,v_as,kk,sq,vv)
for (l = 0; l < mxyz1; l++) {
loff = l/mxy1;
k = l - mxy1*loff;
loff = mz*loff;
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
v_noff = _mm512_set1_epi32(noff);
v_moff = _mm512_set1_epi32(moff);
v_loff = _mm512_set1_epi32(loff);
npp = kpic[l];
npoff = idimp*nppmx*l;
/* zero out local accumulator */
/* for (j = 0; j < mxyv*(mz+1); j++) { */
/* sq[j] = 0.0f; */
/* } */
memset((void*)sq,0,mxyv*(mz+1)*sizeof(float));
nps = 16*(npp/16);
/* vector loop over particles in blocks of 16 */
for (j = 0; j < nps; j+=16) {
/* x = ppart[j+npoff]; */
/* y = ppart[j+nppmx+npoff]; */
/* z = ppart[j+2*nppmx+npoff]; */
v_x = _mm512_load_ps(&ppart[j+npoff]);
v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]);
v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]);
/* nn = x; */
/* mm = y; */
/* ll = z; */
v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
/* dxp = qm*(x - (float) nn); */
v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dxp = _mm512_mul_ps(v_qm,_mm512_sub_ps(v_x,v_dxp));
/* dyp = y - (float) mm; */
v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dyp = _mm512_sub_ps(v_y,v_dyp);
/* dzp = z - (float) ll; */
v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dzp = _mm512_sub_ps(v_z,v_dzp);
/* nn = nn - noff + mxv*(mm - moff) + mxyv*(ll - loff); */
v_nn = _mm512_sub_epi32(v_nn,v_noff);
v_mm = _mm512_sub_epi32(v_mm,v_moff);
v_ll = _mm512_sub_epi32(v_ll,v_loff);
v_it = _mm512_mullo_epi32(v_mxyv,v_ll);
v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv,v_mm));
v_nn = _mm512_add_epi32(v_nn,v_it);
/* amx = qm - dxp; */
/* amy = 1.0f - dyp; */
/* amz = 1.0f - dzp; */
v_amx = _mm512_sub_ps(v_qm,v_dxp);
v_amy = _mm512_sub_ps(v_one,v_dyp);
v_amz = _mm512_sub_ps(v_one,v_dzp);
/* dx1 = dxp*dyp; */
/* dyp = amx*dyp; */
/* amx = amx*amy; */
/* amy = dxp*amy; */
v_dx1 = _mm512_mul_ps(v_dxp,v_dyp);
v_dyp = _mm512_mul_ps(v_amx,v_dyp);
v_amx = _mm512_mul_ps(v_amx,v_amy);
v_amy = _mm512_mul_ps(v_dxp,v_amy);
/* x = amx*amz; */
/* y = amy*amz; */
/* z = dyp*amz; */
/* w = dx1*amz; */
vv[0].v16 = _mm512_mul_ps(v_amx,v_amz);
vv[1].v16 = _mm512_mul_ps(v_amy,v_amz);
vv[2].v16 = _mm512_mul_ps(v_dyp,v_amz);
vv[3].v16 = _mm512_mul_ps(v_dx1,v_amz);
vv[4].v16 = _mm512_mul_ps(v_amx,v_dzp);
vv[5].v16 = _mm512_mul_ps(v_amy,v_dzp);
vv[6].v16 = _mm512_mul_ps(v_dyp,v_dzp);
vv[7].v16 = _mm512_mul_ps(v_dx1,v_dzp);
_mm512_store_epi32(kk,v_nn);
/* deposit charge */
/* x = sq[nn] + amx*amz; */
/* y = sq[nn+1] + amy*amz; */
/* z = sq[nn+mxv] + dyp*amz; */
/* w = sq[nn+1+mxv] + dx1*amz; */
/* sq[nn] = x; */
/* sq[nn+1] = y; */
/* sq[nn+mxv] = z; */
/* sq[nn+1+mxv] = w; */
/* mm = nn + mxyv; */
/* x = sq[mm] + amx*dzp; */
/* y = sq[mm+1] + amy*dzp; */
/* z = sq[mm+mxv] + dyp*dzp; */
/* w = sq[mm+1+mxv] + dx1*dzp; */
/* sq[mm] = x; */
/* sq[mm+1] = y; */
/* sq[mm+mxv] = z; */
/* sq[mm+1+mxv] = w; */
for (i = 0; i < 16; i++) {
nn = kk[i];
x = sq[nn] + vv[0].v[i];
y = sq[nn+1] + vv[1].v[i];
z = sq[nn+mxv] + vv[2].v[i];
w = sq[nn+1+mxv] + vv[3].v[i];
sq[nn] = x;
sq[nn+1] = y;
sq[nn+mxv] = z;
sq[nn+1+mxv] = w;
mm = nn + mxyv;
x = sq[mm] + vv[4].v[i];
y = sq[mm+1] + vv[5].v[i];
z = sq[mm+mxv] + vv[6].v[i];
w = sq[mm+1+mxv] + vv[7].v[i];
sq[mm] = x;
sq[mm+1] = y;
sq[mm+mxv] = z;
sq[mm+1+mxv] = w;
}
}
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
z = ppart[j+2*nppmx+npoff];
nn = x;
mm = y;
ll = z;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
dzp = z - (float) ll;
nn = nn - noff + mxv*(mm - moff) + mxyv*(ll - loff);
amx = qm - dxp;
amy = 1.0f - dyp;
amz = 1.0f - dzp;
dx1 = dxp*dyp;
dyp = amx*dyp;
amx = amx*amy;
amy = dxp*amy;
/* deposit charge */
x = sq[nn] + amx*amz;
y = sq[nn+1] + amy*amz;
z = sq[nn+mxv] + dyp*amz;
w = sq[nn+1+mxv] + dx1*amz;
sq[nn] = x;
sq[nn+1] = y;
sq[nn+mxv] = z;
sq[nn+1+mxv] = w;
mm = nn + mxyv;
x = sq[mm] + amx*dzp;
y = sq[mm+1] + amy*dzp;
z = sq[mm+mxv] + dyp*dzp;
w = sq[mm+1+mxv] + dx1*dzp;
sq[mm] = x;
sq[mm+1] = y;
sq[mm+mxv] = z;
sq[mm+1+mxv] = w;
}
/* deposit charge to interior points in global array */
nn = nxv - noff;
nn = mx < nn ? mx : nn;
mm = nyv - moff;
mm = my < mm ? my : mm;
ll = nzv - loff;
ll = mz < ll ? mz : ll;
nps = 16*(nn/16);
for (k = 1; k < ll; k++) {
for (j = 1; j < mm; j++) {
/* vector loop over elements in blocks of 4 */
/* for (i = 1; i < nn; i++) { */
/* q[i+noff+nxv*(j+moff)+nxyv*(k+loff)] */
/* += sq[i+mxv*j+mxyv*k]; */
/* } */
for (i = 0; i < nps; i+=16) {
m = i + mxv*j + mxyv*k;
v_as = _mm512_loadunpacklo_ps(v_as,&sq[m]);
v_as = _mm512_loadunpackhi_ps(v_as,&sq[m+16]);
m = i + noff + nxv*(j + moff) + nxyv*(k + loff);
v_at = _mm512_loadunpacklo_ps(v_at,&q[m]);
v_at = _mm512_loadunpackhi_ps(v_at,&q[m+16]);
/* skip add for first element for i = 0 */
if (i==0)
v_at = _mm512_mask_add_ps(v_at,v_m,v_at,v_as);
else
v_at = _mm512_add_ps(v_at,v_as);
_mm512_packstorelo_ps(&q[m],v_at);
_mm512_packstorehi_ps(&q[m+16],v_at);
}
/* loop over remaining elements */
m = 1 > nps ? 1 : nps;
for (i = m ; i < nn; i++) {
q[i+noff+nxv*(j+moff)+nxyv*(k+loff)]
+= sq[i+mxv*j+mxyv*k];
}
}
}
/* deposit charge to edge points in global array */
lm = nzv - loff;
lm = mz+1 < lm ? mz+1 : lm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
q[i+noff+nxv*(j+moff)+nxyv*loff] += sq[i+mxv*j];
if (lm > mz) {
#pragma omp atomic
q[i+noff+nxv*(j+moff)+nxyv*(lm+loff-1)]
+= sq[i+mxv*j+mxyv*(lm-1)];
}
}
}
nm = nxv - noff;
nm = mx+1 < nm ? mx+1 : nm;
mm = nyv - moff;
mm = my+1 < mm ? my+1 : mm;
for (k = 0; k < ll; k++) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
q[i+noff+nxv*moff+nxyv*(k+loff)] += sq[i+mxyv*k];
if (mm > my) {
#pragma omp atomic
q[i+noff+nxv*(mm+moff-1)+nxyv*(k+loff)]
+= sq[i+mxv*(mm-1)+mxyv*k];
}
}
for (j = 0; j < mm; j++) {
#pragma omp atomic
q[noff+nxv*(j+moff)+nxyv*(k+loff)] += sq[mxv*j+mxyv*k];
if (nm > mx) {
#pragma omp atomic
q[nm+noff-1+nxv*(j+moff)+nxyv*(k+loff)]
+= sq[nm-1+mxv*j+mxyv*k];
}
}
}
if (lm > mz) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
q[i+noff+nxv*moff+nxyv*(lm+loff-1)] += sq[i+mxyv*(lm-1)];
if (mm > my) {
#pragma omp atomic
q[i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1)]
+= sq[i+mxv*(mm-1)+mxyv*(lm-1)];
}
}
for (j = 0; j < mm; j++) {
#pragma omp atomic
q[noff+nxv*(j+moff)+nxyv*(lm+loff-1)]
+= sq[mxv*j+mxyv*(lm-1)];
if (nm > mx) {
#pragma omp atomic
q[nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1)]
+= sq[nm-1+mxv*j+mxyv*(lm-1)];
}
}
}
}
return;
#undef MXV
#undef MYV
#undef MZV
}
/*--------------------------------------------------------------------*/
void ckncgjppost3lt(float ppart[], float cu[], int kpic[], float qm,
float dt, int nppmx, int idimp, int nx, int ny,
int nz, int mx, int my, int mz, int nxv, int nyv,
int nzv, int mx1, int my1, int mxyz1, int ipbc) {
/* for 3d code, this subroutine calculates particle current density
using first-order linear interpolation
in addition, particle positions are advanced a half time-step
OpenMP/vector version using guard cells
data deposited in tiles
particles stored segmented array
69 flops/particle, 30 loads, 27 stores
input: all, output: ppart, cu
current density is approximated by values at the nearest grid points
cu(i,n,m,l)=qci*(1.-dx)*(1.-dy)*(1.-dz)
cu(i,n+1,m,l)=qci*dx*(1.-dy)*(1.-dz)
cu(i,n,m+1,l)=qci*(1.-dx)*dy*(1.-dz)
cu(i,n+1,m+1,l)=qci*dx*dy*(1.-dz)
cu(i,n,m,l+1)=qci*(1.-dx)*(1.-dy)*dz
cu(i,n+1,m,l+1)=qci*dx*(1.-dy)*dz
cu(i,n,m+1,l+1)=qci*(1.-dx)*dy*dz
cu(i,n+1,m+1,l+1)=qci*dx*dy*dz
where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l
and qci = qm*vi, where i = x,y,z
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = position z of particle n in tile m
ppart[m][3][n] = velocity vx of particle n in tile m
ppart[m][4][n] = velocity vy of particle n in tile m
ppart[m][5][n] = velocity vz of particle n in tile m
cu[l][k][j][i] = ith component of current density at grid point j,k,l
kpic = number of particles per tile
qm = charge on particle, in units of e
dt = time interval between successive calculations
nppmx = maximum number of particles in tile
idimp = size of phase space = 6
nx/ny/nz = system length in x/y/z direction
mx/my/mz = number of grids in sorting cell in x/y/z
nxv = second dimension of current array, must be >= nx+1
nyv = third dimension of current array, must be >= ny+1
nzv = fourth dimension of current array, must be >= nz+1
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mxyz1 = mx1*my1*mz1,
where mz1 = (system length in z direction - 1)/mz + 1
ipbc = particle boundary condition = (0,1,2,3) =
(none,3d periodic,3d reflecting,mixed 2d reflecting/1d periodic)
requires KNC, part needs to be 64 byte aligned
nppmx needs to be a multiple of 16
cu needs to have 4 components, although one is not used
local data */
#define MXV 17
#define MYV 17
#define MZV 17
int mxy1, noff, moff, loff, npoff, npp, nps;
int i, j, k, l, m, nn, mm, ll, ii, nm, lm, mxv, myv, mxyv, nxyv;
float edgelx, edgely, edgelz, edgerx, edgery, edgerz;
float dxp, dyp, dzp, amx, amy, amz, dx1, dx, dy, dz, vx, vy, vz;
float x, y, z;
__m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4;
__m512i v_nn, v_mm, v_ll, v_it, v_perm;
__m512 v_qm, v_dt, v_one, v_zero;
__m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz;
__m512 v_dx1, v_at, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz;
__m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz;
__m512 a, b, c, d, e, f, g, h, p, q, r, s, t, u, v, ws, wt, wu, wv;
__m512 cp, cr;
__mmask16 msk;
__attribute__((aligned(64))) unsigned int kk[16];
float scu[4*MXV*MYV*MZV];
/* float scu[4*(mx+1)*(my+1)*(mz+1)]; */
mxy1 = mx1*my1;
/* mxv = MXV; */
/* myv = MYV; */
mxv = mx+1;
myv = my+1;
mxyv = mxv*myv;
nxyv = nxv*nyv;
/* set boundary values */
edgelx = 0.0f;
edgely = 0.0f;
edgelz = 0.0f;
edgerx = (float) nx;
edgery = (float) ny;
edgerz = (float) nz;
if (ipbc==2) {
edgelx = 1.0f;
edgely = 1.0f;
edgelz = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
edgerz = (float) (nz-1);
}
else if (ipbc==3) {
edgelx = 1.0f;
edgely = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
}
v_mxv4 = _mm512_set1_epi32(4*mxv);
v_mxyv4 = _mm512_set1_epi32(4*mxyv);
v_perm = _mm512_set_epi32(15,11,7,3,14,10,6,2,13,9,5,1,12,8,4,0);
v_qm = _mm512_set1_ps(qm);
v_dt = _mm512_set1_ps(dt);
v_one = _mm512_set1_ps(1.0f);
v_zero = _mm512_setzero_ps();
v_edgelx = _mm512_set1_ps(edgelx);
v_edgely = _mm512_set1_ps(edgely);
v_edgelz = _mm512_set1_ps(edgelz);
v_edgerx = _mm512_set1_ps(edgerx);
v_edgery = _mm512_set1_ps(edgery);
v_edgerz = _mm512_set1_ps(edgerz);
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,l,noff,moff,loff,npp,npoff,nps,nn,mm,ll,ii,nm,lm,x,y,z, \
vx,vy,vz,dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,v_noff,v_moff,v_loff, \
v_nn,v_mm,v_ll,v_it,v_x,v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx,v_amy,v_amz, \
v_dx1,v_vx,v_vy,v_vz,v_at,a,b,c,d,e,f,g,h,p,q,r,s,t,u,v,ws,wt,wu,wv, \
cp,cr,msk,kk,scu)
for (l = 0; l < mxyz1; l++) {
loff = l/mxy1;
k = l - mxy1*loff;
loff = mz*loff;
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
v_noff = _mm512_set1_epi32(noff);
v_moff = _mm512_set1_epi32(moff);
v_loff = _mm512_set1_epi32(loff);
npp = kpic[l];
npoff = idimp*nppmx*l;
/* zero out local accumulator */
/* for (j = 0; j < 4*mxyv*(mz+1); j++) { */
/* scu[j] = 0.0f; */
/* } */
memset((void*)scu,0,4*mxyv*(mz+1)*sizeof(float));
nps = 16*(npp/16);
/* loop over particles in tile in blocks of 16 */
for (j = 0; j < nps; j+=16) {
/* find interpolation weights */
/* x = ppart[j+npoff]; */
/* y = ppart[j+nppmx+npoff]; */
/* z = ppart[j+2*nppmx+npoff]; */
v_x = _mm512_load_ps(&ppart[j+npoff]);
v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]);
v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]);
/* nn = x; */
/* mm = y; */
/* ll = z; */
v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
/* dxp = qm*(x - (float) nn); */
/* dyp = y - (float) mm; */
/* dzp = z - (float) ll; */
v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dxp = _mm512_mul_ps(v_qm,_mm512_sub_ps(v_x,v_dxp));
v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dyp = _mm512_sub_ps(v_y,v_dyp);
v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dzp = _mm512_sub_ps(v_z,v_dzp);
/* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */
v_nn = _mm512_sub_epi32(v_nn,v_noff);
v_mm = _mm512_sub_epi32(v_mm,v_moff);
v_ll = _mm512_sub_epi32(v_ll,v_loff);
v_it = _mm512_mullo_epi32(v_mxyv4,v_ll);
v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm));
v_nn = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it);
/* amx = qm - dxp; */
/* amy = 1.0f - dyp; */
/* amz = 1.0f - dzp; */
v_amx = _mm512_sub_ps(v_qm,v_dxp);
v_amy = _mm512_sub_ps(v_one,v_dyp);
v_amz = _mm512_sub_ps(v_one,v_dzp);
/* dx1 = dxp*dyp; */
/* dyp = amx*dyp; */
/* amx = amx*amy; */
/* amy = dxp*amy; */
v_dx1 = _mm512_mul_ps(v_dxp,v_dyp);
v_dyp = _mm512_mul_ps(v_amx,v_dyp);
v_amx = _mm512_mul_ps(v_amx,v_amy);
v_amy = _mm512_mul_ps(v_dxp,v_amy);
/* a = amx*amz; */
/* b = amy*amz; */
/* c = dyp*amz; */
/* d = dx1*amz; */
a = _mm512_mul_ps(v_amx,v_amz);
b = _mm512_mul_ps(v_amy,v_amz);
c = _mm512_mul_ps(v_dyp,v_amz);
d = _mm512_mul_ps(v_dx1,v_amz);
/* e = amx*dzp; */
/* f = amy*dzp; */
/* g = dyp*dzp; */
/* h = dx1*dzp; */
e = _mm512_mul_ps(v_amx,v_dzp);
f = _mm512_mul_ps(v_amy,v_dzp);
g = _mm512_mul_ps(v_dyp,v_dzp);
h = _mm512_mul_ps(v_dx1,v_dzp);
/* deposit current */
/* vx = ppart[j+3*nppmx+npoff]; */
/* vy = ppart[j+4*nppmx+npoff]; */
/* vz = ppart[j+5*nppmx+npoff]; */
v_vx = _mm512_load_ps(&ppart[j+3*nppmx+npoff]);
v_vy = _mm512_load_ps(&ppart[j+4*nppmx+npoff]);
v_vz = _mm512_load_ps(&ppart[j+5*nppmx+npoff]);
v_ll = _mm512_add_epi32(v_nn,v_mxyv4);
/* deposit charge for one particle at a time */
for (i = 0; i < 16; i++) {
ii = i >> 2;
if (i==(ii<<2)) {
switch (ii)
{
case 0:
/* replicate velocities of first group of 4 particles */
p = _mm512_permute4f128_ps(v_vx,0);
q = _mm512_permute4f128_ps(v_vy,0);
r = _mm512_permute4f128_ps(v_vz,0);
/* regroup weights for first group of 4 particles */
s = _mm512_mask_permute4f128_ps(a,
_mm512_int2mask(61680),b,177);
t = _mm512_mask_permute4f128_ps(c,
_mm512_int2mask(61680),d,177);
u = _mm512_mask_permute4f128_ps(e,
_mm512_int2mask(61680),f,177);
v = _mm512_mask_permute4f128_ps(g,
_mm512_int2mask(61680),h,177);
break;
case 1:
/* replicate velocities of second group of 4 particles */
p = _mm512_permute4f128_ps(v_vx,85);
q = _mm512_permute4f128_ps(v_vy,85);
r = _mm512_permute4f128_ps(v_vz,85);
/* regroup weights for second group of 4 particles */
s = _mm512_mask_permute4f128_ps(b,
_mm512_int2mask(3855),a,177);
t = _mm512_mask_permute4f128_ps(d,
_mm512_int2mask(3855),c,177);
u = _mm512_mask_permute4f128_ps(f,
_mm512_int2mask(3855),e,177);
v = _mm512_mask_permute4f128_ps(h,
_mm512_int2mask(3855),g,177);
break;
case 2:
/* replicate velocities of third group of 4 particles */
p = _mm512_permute4f128_ps(v_vx,170);
q = _mm512_permute4f128_ps(v_vy,170);
r = _mm512_permute4f128_ps(v_vz,170);
/* regroup weights for third group of 4 particles */
s = _mm512_mask_permute4f128_ps(a,
_mm512_int2mask(61680),b,177);
s = _mm512_permute4f128_ps(s,78);
t = _mm512_mask_permute4f128_ps(c,
_mm512_int2mask(61680),d,177);
t = _mm512_permute4f128_ps(t,78);
u = _mm512_mask_permute4f128_ps(e,
_mm512_int2mask(61680),f,177);
u = _mm512_permute4f128_ps(u,78);
v = _mm512_mask_permute4f128_ps(g,
_mm512_int2mask(61680),h,177);
v = _mm512_permute4f128_ps(v,78);
break;
case 3:
/* replicate velocities of fourth group of 4 particles */
p = _mm512_permute4f128_ps(v_vx,255);
q = _mm512_permute4f128_ps(v_vy,255);
r = _mm512_permute4f128_ps(v_vz,255);
/* regroup weights for fourth group of 4 particles */
s = _mm512_mask_permute4f128_ps(b,
_mm512_int2mask(3855),a,177);
s = _mm512_permute4f128_ps(s,78);
t = _mm512_mask_permute4f128_ps(d,
_mm512_int2mask(3855),c,177);
t = _mm512_permute4f128_ps(t,78);
u = _mm512_mask_permute4f128_ps(f,
_mm512_int2mask(3855),e,177);
u = _mm512_permute4f128_ps(u,78);
v = _mm512_mask_permute4f128_ps(h,
_mm512_int2mask(3855),g,177);
v = _mm512_permute4f128_ps(v,78);
break;
}
}
v_it = _mm512_setzero_epi32();
switch (i-(ii<<2))
{
/* first particle */
case 0:
/* reorder velocity components */
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)p,
_mm512_int2mask(170),(__m512i)q,177);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at,
_mm512_int2mask(68),(__m512i)r,78);
/* reorder weights */
ws = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)s,0);
wt = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)t,0);
wu = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)u,0);
wv = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)v,0);
break;
/* second particle */
case 1:
/* reorder velocity components */
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)q,
_mm512_int2mask(85),(__m512i)p,177);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at,
_mm512_int2mask(68),(__m512i)r,24);
/* reorder weights */
ws = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)s,85);
wt = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)t,85);
wu = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)u,85);
wv = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)v,85);
break;
/* third particle */
case 2:
/* reorder velocity components */
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)p,
_mm512_int2mask(170),(__m512i)q,177);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)r,
_mm512_int2mask(51),(__m512i)v_at,78);
/* reorder weights */
ws = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)s,170);
wt = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)t,170);
wu = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)u,170);
wv = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)v,170);
break;
/* fourth particle */
case 3:
/* reorder velocity components */
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)q,
_mm512_int2mask(85),(__m512i)p,177);
v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_at,78);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at,
_mm512_int2mask(68),(__m512i)r,177);
/* reorder weights */
ws = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)s,255);
wt = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)t,255);
wu = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)u,255);
wv = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)v,255);
break;
}
_mm512_store_epi32(kk,v_nn);
/* load scu[nn:nn+3] and scu[nn+4:nn+7] field components */
/* dx = amx*amz; */
/* dy = amy*amz; */
/* scu[nn] += vx*dx; */
/* scu[nn+1] += vy*dx; */
/* scu[nn+2] += vz*dx; */
/* dx = dyp*amz; */
/* scu[nn+4] += vx*dy; */
/* scu[nn+1+4] += vy*dy; */
/* scu[nn+2+4] += vz*dy; */
mm = kk[i];
cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255),
&scu[mm]);
cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255),
&scu[mm+16]);
cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),ws,cp);
_mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cp);
_mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cp);
/* mm = nn + 4*mxv; */
/* load scu[mm:mm+3] and scu[mm+4:mm+7] field components */
/* dx = dyp*amz; */
/* dy = dx1*amz; */
/* scu[mm] += vx*dx; */
/* scu[mm+1] += vy*dx; */
/* scu[mm+2] += vz*dx; */
/* scu[mm+4] += vx*dy; */
/* scu[mm+1+4] += vy*dy; */
/* scu[mm+2+4] += vz*dy; */
mm = kk[i] + 4*mxv;
cr = _mm512_mask_loadunpacklo_ps(cr,_mm512_int2mask(255),
&scu[mm]);
cr = _mm512_mask_loadunpackhi_ps(cr,_mm512_int2mask(255),
&scu[mm+16]);
cr = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wt,cr);
_mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cr);
_mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cr);
_mm512_store_epi32(kk,v_ll);
/* nn += 4*mxyv; */
/* load scu[nn:nn+3] and scu[nn+4:nn+7] field components */
/* dx = amx*dzp; */
/* dy = amy*dzp; */
/* scu[nn] += vx*dx; */
/* scu[nn+1] += vy*dx; */
/* scu[nn+2] += vz*dx; */
/* scu[nn+4] += vx*dy; */
/* scu[nn+1+4] += vy*dy; */
/* scu[nn+2+4] += vz*dy; */
mm = kk[i];
cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255),
&scu[mm]);
cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255),
&scu[mm+16]);
cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wu,cp);
_mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cp);
_mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cp);
/* mm = nn + 4*mxv; */
/* load scu[mm:mm+3] and scu[mm+4:mm+7] field components */
/* dx = dyp*dzp; */
/* dy = dx1*dzp; */
/* scu[mm] += vx*dx; */
/* scu[mm+1] += vy*dx; */
/* scu[mm+2] += vz*dx; */
/* scu[mm+4] += vx*dy; */
/* scu[mm+1+4] += vy*dy; */
/* scu[mm+2+4] += vz*dy; */
mm = kk[i] + 4*mxv;
cr = _mm512_mask_loadunpacklo_ps(cr,_mm512_int2mask(255),
&scu[mm]);
cr = _mm512_mask_loadunpackhi_ps(cr,_mm512_int2mask(255),
&scu[mm+16]);
cr = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wv,cr);
_mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cr);
_mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cr);
}
/* advance position half a time-step */
/* dx = x + vx*dt; */
/* dy = y + vy*dt; */
/* dz = z + vz*dt; */
v_dx = _mm512_fmadd_ps(v_vx,v_dt,v_x);
v_dy = _mm512_fmadd_ps(v_vy,v_dt,v_y);
v_dz = _mm512_fmadd_ps(v_vz,v_dt,v_z);
/* reflecting boundary conditions */
if (ipbc==2) {
/* if ((dx < edgelx) || (dx >= edgerx)) { */
/* dx = x; */
/* ppart[j+3*nppmx+npoff] = -vx; */
/* } */
msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx,
_MM_CMPINT_GE));
v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x);
v_vx = _mm512_mask_sub_ps(v_vx,msk,v_zero,v_vx);
/* write output if test result is true for any particle */
if (msk)
_mm512_store_ps(&ppart[j+3*nppmx+npoff],v_vx);
/* if ((dy < edgely) || (dy >= edgery)) { */
/* dy = y; */
/* ppart[j+4*nppmx+npoff] = -vy; */
/* } */
msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery,
_MM_CMPINT_GE));
v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y);
v_vy = _mm512_mask_sub_ps(v_vy,msk,v_zero,v_vy);
/* write output if test result is true for any particle */
if (msk)
_mm512_store_ps(&ppart[j+4*nppmx+npoff],v_vy);
/* if ((dz < edgelz) || (dz >= edgerz)) { */
/* dz = z; */
/* ppart[j+5*nppmx+npoff] = -vz; */
/* } */
msk = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dz,v_edgerz,
_MM_CMPINT_GE));
v_dz = _mm512_mask_blend_ps(msk,v_dz,v_z);
v_vz = _mm512_mask_sub_ps(v_vz,msk,v_zero,v_vz);
/* write output if test result is true for any particle */
if (msk)
_mm512_store_ps(&ppart[j+5*nppmx+npoff],v_vz);
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
/* if ((dx < edgelx) || (dx >= edgerx)) { */
/* dx = x; */
/* ppart[j+3*nppmx+npoff] = -vx; */
/* } */
msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx,
_MM_CMPINT_GE));
v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x);
v_vx = _mm512_mask_sub_ps(v_vx,msk,v_zero,v_vx);
/* write output if test result is true for any particle */
if (msk)
_mm512_store_ps(&ppart[j+3*nppmx+npoff],v_vx);
/* if ((dy < edgely) || (dy >= edgery)) { */
/* dy = y; */
/* ppart[j+4*nppmx+npoff] = -vy; */
/* } */
msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery,
_MM_CMPINT_GE));
v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y);
v_vy = _mm512_mask_sub_ps(v_vy,msk,v_zero,v_vy);
/* write output if test result is true for any particle */
if (msk)
_mm512_store_ps(&ppart[j+4*nppmx+npoff],v_vy);
}
/* set new position */
/* ppart[j+npoff] = dx; */
/* ppart[j+nppmx+npoff] = dy; */
/* ppart[j+2*nppmx+npoff] = dz; */
_mm512_store_ps(&ppart[j+npoff],v_dx);
_mm512_store_ps(&ppart[j+nppmx+npoff],v_dy);
_mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz);
}
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
z = ppart[j+2*nppmx+npoff];
nn = x;
mm = y;
ll = z;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
dzp = z - (float) ll;
nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff));
amx = qm - dxp;
amy = 1.0f - dyp;
dx1 = dxp*dyp;
dyp = amx*dyp;
amx = amx*amy;
amz = 1.0f - dzp;
amy = dxp*amy;
/* deposit current within tile to local accumulator */
dx = amx*amz;
dy = amy*amz;
vx = ppart[j+3*nppmx+npoff];
vy = ppart[j+4*nppmx+npoff];
vz = ppart[j+5*nppmx+npoff];
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
dx = dyp*amz;
scu[nn+4] += vx*dy;
scu[nn+1+4] += vy*dy;
scu[nn+2+4] += vz*dy;
dy = dx1*amz;
mm = nn + 4*mxv;
scu[mm] += vx*dx;
scu[mm+1] += vy*dx;
scu[mm+2] += vz*dx;
dx = amx*dzp;
scu[mm+4] += vx*dy;
scu[mm+1+4] += vy*dy;
scu[mm+2+4] += vz*dy;
dy = amy*dzp;
nn += 4*mxyv;
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
dx = dyp*dzp;
scu[nn+4] += vx*dy;
scu[nn+1+4] += vy*dy;
scu[nn+2+4] += vz*dy;
dy = dx1*dzp;
mm = nn + 4*mxv;
scu[mm] += vx*dx;
scu[mm+1] += vy*dx;
scu[mm+2] += vz*dx;
scu[mm+4] += vx*dy;
scu[mm+1+4] += vy*dy;
scu[mm+2+4] += vz*dy;
/* advance position half a time-step */
dx = x + vx*dt;
dy = y + vy*dt;
dz = z + vz*dt;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
ppart[j+3*nppmx+npoff] = -vx;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
ppart[j+4*nppmx+npoff] = -vy;
}
if ((dz < edgelz) || (dz >= edgerz)) {
dz = z;
ppart[j+5*nppmx+npoff] = -vz;
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
ppart[j+3*nppmx+npoff] = -vx;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
ppart[j+4*nppmx+npoff] = -vy;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
ppart[j+2*nppmx+npoff] = dz;
}
/* deposit current to interior points in global array */
nn = nxv - noff;
nn = mx < nn ? mx : nn;
mm = nyv - moff;
mm = my < mm ? my : mm;
ll = nzv - loff;
ll = mz < ll ? mz : ll;
for (k = 1; k < ll; k++) {
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
cu[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[4*(i+mxv*j+mxyv*k)];
cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[1+4*(i+mxv*j+mxyv*k)];
cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[2+4*(i+mxv*j+mxyv*k)];
}
}
}
/* deposit current to edge points in global array */
lm = nzv - loff;
lm = mz+1 < lm ? mz+1 : lm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[4*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[4*(i+mxv*j)];
#pragma omp atomic
cu[1+4*(i+noff+nxv*(j+moff)+nxyv*loff)]
+= scu[1+4*(i+mxv*j)];
#pragma omp atomic
cu[2+4*(i+noff+nxv*(j+moff)+nxyv*loff)]
+= scu[2+4*(i+mxv*j)];
if (lm > mz) {
#pragma omp atomic
cu[4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[4*(i+mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[1+4*(i+mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[2+4*(i+mxv*j+mxyv*(lm-1))];
}
}
}
nm = nxv - noff;
nm = mx+1 < nm ? mx+1 : nm;
mm = nyv - moff;
mm = my+1 < mm ? my+1 : mm;
for (k = 0; k < ll; k++) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[4*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[4*(i+mxyv*k)];
#pragma omp atomic
cu[1+4*(i+noff+nxv*moff+nxyv*(k+loff))]
+= scu[1+4*(i+mxyv*k)];
#pragma omp atomic
cu[2+4*(i+noff+nxv*moff+nxyv*(k+loff))]
+= scu[2+4*(i+mxyv*k)];
if (mm > my) {
#pragma omp atomic
cu[4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))]
+= scu[4*(i+mxv*(mm-1)+mxyv*k)];
#pragma omp atomic
cu[1+4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))]
+= scu[1+4*(i+mxv*(mm-1)+mxyv*k)];
#pragma omp atomic
cu[2+4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))]
+= scu[2+4*(i+mxv*(mm-1)+mxyv*k)];
}
}
for (j = 0; j < mm; j++) {
#pragma omp atomic
cu[4*(noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[4*(mxv*j+mxyv*k)];
#pragma omp atomic
cu[1+4*(noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[1+4*(mxv*j+mxyv*k)];
#pragma omp atomic
cu[2+4*(noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[2+4*(mxv*j+mxyv*k)];
if (nm > mx) {
#pragma omp atomic
cu[4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[4*(nm-1+mxv*j+mxyv*k)];
#pragma omp atomic
cu[1+4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[1+4*(nm-1+mxv*j+mxyv*k)];
#pragma omp atomic
cu[2+4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[2+4*(nm-1+mxv*j+mxyv*k)];
}
}
}
if (lm > mz) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[4*(i+noff+nxv*moff+nxyv*(lm+loff-1))]
+= scu[4*(i+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(i+noff+nxv*moff+nxyv*(lm+loff-1))]
+= scu[1+4*(i+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(i+noff+nxv*moff+nxyv*(lm+loff-1))]
+= scu[2+4*(i+mxyv*(lm-1))];
if (mm > my) {
#pragma omp atomic
cu[4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))]
+= scu[4*(i+mxv*(mm-1)+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))]
+= scu[1+4*(i+mxv*(mm-1)+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))]
+= scu[2+4*(i+mxv*(mm-1)+mxyv*(lm-1))];
}
}
for (j = 0; j < mm; j++) {
#pragma omp atomic
cu[4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[4*(mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[1+4*(mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[2+4*(mxv*j+mxyv*(lm-1))];
if (nm > mx) {
#pragma omp atomic
cu[4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[4*(nm-1+mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[1+4*(nm-1+mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[2+4*(nm-1+mxv*j+mxyv*(lm-1))];
}
}
}
}
return;
#undef MXV
#undef MYV
#undef MZV
}
/*--------------------------------------------------------------------*/
void ckncgrjppost3lt(float ppart[], float cu[], int kpic[], float qm,
float dt, float ci, int nppmx, int idimp, int nx,
int ny, int nz, int mx, int my, int mz, int nxv,
int nyv, int nzv, int mx1, int my1, int mxyz1,
int ipbc) {
/* for 3d code, this subroutine calculates particle current density
using first-order linear interpolation for relativistic particles
in addition, particle positions are advanced a half time-step
OpenMP/vector version using guard cells
data deposited in tiles
particles stored segmented array
79 flops/particle, 1 divide, 1 sqrt, 30 loads, 27 stores
input: all, output: ppart, cu
current density is approximated by values at the nearest grid points
cu(i,n,m,l)=qci*(1.-dx)*(1.-dy)*(1.-dz)
cu(i,n+1,m,l)=qci*dx*(1.-dy)*(1.-dz)
cu(i,n,m+1,l)=qci*(1.-dx)*dy*(1.-dz)
cu(i,n+1,m+1,l)=qci*dx*dy*(1.-dz)
cu(i,n,m,l+1)=qci*(1.-dx)*(1.-dy)*dz
cu(i,n+1,m,l+1)=qci*dx*(1.-dy)*dz
cu(i,n,m+1,l+1)=qci*(1.-dx)*dy*dz
cu(i,n+1,m+1,l+1)=qci*dx*dy*dz
where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l
and qci = qm*pi*gami, where i = x,y,z
where gami = 1./sqrt(1.+sum(pi**2)*ci*ci)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = position z of particle n in tile m
ppart[m][3][n] = momentum vx of particle n in tile m
ppart[m][4][n] = momentum vy of particle n in tile m
ppart[m][5][n] = momentum vz of particle n in tile m
cu[l][k][j][i] = ith component of current density at grid point j,k,l
kpic = number of particles per tile
qm = charge on particle, in units of e
dt = time interval between successive calculations
ci = reciprocal of velocity of light
nppmx = maximum number of particles in tile
idimp = size of phase space = 6
nx/ny/nz = system length in x/y/z direction
mx/my/mz = number of grids in sorting cell in x/y/z
nxv = second dimension of current array, must be >= nx+1
nyv = third dimension of current array, must be >= ny+1
nzv = fourth dimension of current array, must be >= nz+1
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mxyz1 = mx1*my1*mz1,
where mz1 = (system length in z direction - 1)/mz + 1
ipbc = particle boundary condition = (0,1,2,3) =
(none,3d periodic,3d reflecting,mixed 2d reflecting/1d periodic)
requires KNC, part needs to be 64 byte aligned
nppmx needs to be a multiple of 16
cu needs to have 4 components, although one is not used
local data */
#define MXV 17
#define MYV 17
#define MZV 17
int mxy1, noff, moff, loff, npoff, npp, nps;
int i, j, k, l, m, nn, mm, ll, ii, nm, lm, mxv, myv, mxyv, nxyv;
float ci2, edgelx, edgely, edgelz, edgerx, edgery, edgerz;
float dxp, dyp, dzp, amx, amy, amz, dx1, dx, dy, dz, vx, vy, vz;
float p2, gami;
float x, y, z, ux, uy, uz;
__m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4;
__m512i v_nn, v_mm, v_ll, v_it, v_perm;
__m512 v_qm, v_ci2, v_dt, v_one, v_zero;
__m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz;
__m512 v_dx1, v_gami, v_at, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz;
__m512 v_ux, v_uy, v_uz;
__m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz;
__m512 a, b, c, d, e, f, g, h, p, q, r, s, t, u, v, ws, wt, wu, wv;
__m512 cp, cr;
__mmask16 msk;
__attribute__((aligned(64))) unsigned int kk[16];
float scu[4*MXV*MYV*MZV];
/* float scu[4*(mx+1)*(my+1)*(mz+1)]; */
mxy1 = mx1*my1;
/* mxv = MXV; */
/* myv = MYV; */
mxv = mx+1;
myv = my+1;
mxyv = mxv*myv;
nxyv = nxv*nyv;
ci2 = ci*ci;
/* set boundary values */
edgelx = 0.0f;
edgely = 0.0f;
edgelz = 0.0f;
edgerx = (float) nx;
edgery = (float) ny;
edgerz = (float) nz;
if (ipbc==2) {
edgelx = 1.0f;
edgely = 1.0f;
edgelz = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
edgerz = (float) (nz-1);
}
else if (ipbc==3) {
edgelx = 1.0f;
edgely = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
}
v_mxv4 = _mm512_set1_epi32(4*mxv);
v_mxyv4 = _mm512_set1_epi32(4*mxyv);
v_perm = _mm512_set_epi32(15,11,7,3,14,10,6,2,13,9,5,1,12,8,4,0);
v_qm = _mm512_set1_ps(qm);
v_ci2 = _mm512_set1_ps(ci2);
v_dt = _mm512_set1_ps(dt);
v_one = _mm512_set1_ps(1.0f);
v_zero = _mm512_setzero_ps();
v_edgelx = _mm512_set1_ps(edgelx);
v_edgely = _mm512_set1_ps(edgely);
v_edgelz = _mm512_set1_ps(edgelz);
v_edgerx = _mm512_set1_ps(edgerx);
v_edgery = _mm512_set1_ps(edgery);
v_edgerz = _mm512_set1_ps(edgerz);
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,l,noff,moff,loff,npp,npoff,nps,nn,mm,ll,ii,nm,lm,x,y,z, \
vx,vy,vz,ux,uy,uz,dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,p2,gami,v_noff, \
v_moff,v_loff,v_nn,v_mm,v_ll,v_it,v_x,v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx, \
v_amy,v_amz,v_dx1,v_vx,v_vy,v_vz,v_ux,v_uy,v_uz,v_gami,v_at,a,b,c,d,e, \
f,g,h,p,q,r,s,t,u,v,ws,wt,wu,wv,cp,cr,msk,kk,scu)
for (l = 0; l < mxyz1; l++) {
loff = l/mxy1;
k = l - mxy1*loff;
loff = mz*loff;
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
v_noff = _mm512_set1_epi32(noff);
v_moff = _mm512_set1_epi32(moff);
v_loff = _mm512_set1_epi32(loff);
npp = kpic[l];
npoff = idimp*nppmx*l;
/* zero out local accumulator */
/* for (j = 0; j < 4*mxyv*(mz+1); j++) { */
/* scu[j] = 0.0f; */
/* } */
memset((void*)scu,0,4*mxyv*(mz+1)*sizeof(float));
nps = 16*(npp/16);
/* loop over particles in tile in blocks of 16 */
for (j = 0; j < nps; j+=16) {
/* find interpolation weights */
/* x = ppart[j+npoff]; */
/* y = ppart[j+nppmx+npoff]; */
/* z = ppart[j+2*nppmx+npoff]; */
v_x = _mm512_load_ps(&ppart[j+npoff]);
v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]);
v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]);
/* nn = x; */
/* mm = y; */
/* ll = z; */
v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
/* dxp = qm*(x - (float) nn); */
/* dyp = y - (float) mm; */
/* dzp = z - (float) ll; */
v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dxp = _mm512_mul_ps(v_qm,_mm512_sub_ps(v_x,v_dxp));
v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dyp = _mm512_sub_ps(v_y,v_dyp);
v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dzp = _mm512_sub_ps(v_z,v_dzp);
/* find inverse gamma */
/* ux = ppart[j+3*nppmx+npoff]; */
/* uy = ppart[j+4*nppmx+npoff]; */
/* uz = ppart[j+5*nppmx+npoff]; */
v_ux = _mm512_load_ps(&ppart[j+3*nppmx+npoff]);
v_uy = _mm512_load_ps(&ppart[j+4*nppmx+npoff]);
v_uz = _mm512_load_ps(&ppart[j+5*nppmx+npoff]);
/* p2 = ux*ux + uy*uy + uz*uz; */
v_at = _mm512_fmadd_ps(v_uy,v_uy,_mm512_mul_ps(v_ux,v_ux));
v_at = _mm512_fmadd_ps(v_uz,v_uz,v_at);
/* gami = 1.0f/sqrtf(1.0f + p2*ci2); */
/* approximate calculation */
/* v_gami = _mm512_rsqrt23_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */
/* full accuracy calculation */
v_gami = _mm512_sqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one));
v_gami = _mm512_div_ps(v_one,v_gami);
/* full accuracy calculation with SVML */
/* v_gami = _mm512_invsqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */
/* calculate weights */
/* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */
v_nn = _mm512_sub_epi32(v_nn,v_noff);
v_mm = _mm512_sub_epi32(v_mm,v_moff);
v_ll = _mm512_sub_epi32(v_ll,v_loff);
v_it = _mm512_mullo_epi32(v_mxyv4,v_ll);
v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm));
v_nn = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it);
/* amx = qm - dxp; */
/* amy = 1.0f - dyp; */
/* amz = 1.0f - dzp; */
v_amx = _mm512_sub_ps(v_qm,v_dxp);
v_amy = _mm512_sub_ps(v_one,v_dyp);
v_amz = _mm512_sub_ps(v_one,v_dzp);
/* dx1 = dxp*dyp; */
/* dyp = amx*dyp; */
/* amx = amx*amy; */
/* amy = dxp*amy; */
v_dx1 = _mm512_mul_ps(v_dxp,v_dyp);
v_dyp = _mm512_mul_ps(v_amx,v_dyp);
v_amx = _mm512_mul_ps(v_amx,v_amy);
v_amy = _mm512_mul_ps(v_dxp,v_amy);
/* a = amx*amz; */
/* b = amy*amz; */
/* c = dyp*amz; */
/* d = dx1*amz; */
a = _mm512_mul_ps(v_amx,v_amz);
b = _mm512_mul_ps(v_amy,v_amz);
c = _mm512_mul_ps(v_dyp,v_amz);
d = _mm512_mul_ps(v_dx1,v_amz);
/* e = amx*dzp; */
/* f = amy*dzp; */
/* g = dyp*dzp; */
/* h = dx1*dzp; */
e = _mm512_mul_ps(v_amx,v_dzp);
f = _mm512_mul_ps(v_amy,v_dzp);
g = _mm512_mul_ps(v_dyp,v_dzp);
h = _mm512_mul_ps(v_dx1,v_dzp);
/* deposit current */
/* vx = ux*gami; */
/* vy = uy*gami; */
/* vz = uz*gami; */
v_vx = _mm512_mul_ps(v_ux,v_gami);
v_vy = _mm512_mul_ps(v_uy,v_gami);
v_vz = _mm512_mul_ps(v_uz,v_gami);
v_ll = _mm512_add_epi32(v_nn,v_mxyv4);
/* deposit charge for one particle at a time */
for (i = 0; i < 16; i++) {
ii = i >> 2;
if (i==(ii<<2)) {
switch (ii)
{
case 0:
/* replicate velocities of first group of 4 particles */
p = _mm512_permute4f128_ps(v_vx,0);
q = _mm512_permute4f128_ps(v_vy,0);
r = _mm512_permute4f128_ps(v_vz,0);
/* regroup weights for first group of 4 particles */
s = _mm512_mask_permute4f128_ps(a,
_mm512_int2mask(61680),b,177);
t = _mm512_mask_permute4f128_ps(c,
_mm512_int2mask(61680),d,177);
u = _mm512_mask_permute4f128_ps(e,
_mm512_int2mask(61680),f,177);
v = _mm512_mask_permute4f128_ps(g,
_mm512_int2mask(61680),h,177);
break;
case 1:
/* replicate velocities of second group of 4 particles */
p = _mm512_permute4f128_ps(v_vx,85);
q = _mm512_permute4f128_ps(v_vy,85);
r = _mm512_permute4f128_ps(v_vz,85);
/* regroup weights for second group of 4 particles */
s = _mm512_mask_permute4f128_ps(b,
_mm512_int2mask(3855),a,177);
t = _mm512_mask_permute4f128_ps(d,
_mm512_int2mask(3855),c,177);
u = _mm512_mask_permute4f128_ps(f,
_mm512_int2mask(3855),e,177);
v = _mm512_mask_permute4f128_ps(h,
_mm512_int2mask(3855),g,177);
break;
case 2:
/* replicate velocities of third group of 4 particles */
p = _mm512_permute4f128_ps(v_vx,170);
q = _mm512_permute4f128_ps(v_vy,170);
r = _mm512_permute4f128_ps(v_vz,170);
/* regroup weights for third group of 4 particles */
s = _mm512_mask_permute4f128_ps(a,
_mm512_int2mask(61680),b,177);
s = _mm512_permute4f128_ps(s,78);
t = _mm512_mask_permute4f128_ps(c,
_mm512_int2mask(61680),d,177);
t = _mm512_permute4f128_ps(t,78);
u = _mm512_mask_permute4f128_ps(e,
_mm512_int2mask(61680),f,177);
u = _mm512_permute4f128_ps(u,78);
v = _mm512_mask_permute4f128_ps(g,
_mm512_int2mask(61680),h,177);
v = _mm512_permute4f128_ps(v,78);
break;
case 3:
/* replicate velocities of fourth group of 4 particles */
p = _mm512_permute4f128_ps(v_vx,255);
q = _mm512_permute4f128_ps(v_vy,255);
r = _mm512_permute4f128_ps(v_vz,255);
/* regroup weights for fourth group of 4 particles */
s = _mm512_mask_permute4f128_ps(b,
_mm512_int2mask(3855),a,177);
s = _mm512_permute4f128_ps(s,78);
t = _mm512_mask_permute4f128_ps(d,
_mm512_int2mask(3855),c,177);
t = _mm512_permute4f128_ps(t,78);
u = _mm512_mask_permute4f128_ps(f,
_mm512_int2mask(3855),e,177);
u = _mm512_permute4f128_ps(u,78);
v = _mm512_mask_permute4f128_ps(h,
_mm512_int2mask(3855),g,177);
v = _mm512_permute4f128_ps(v,78);
break;
}
}
v_it = _mm512_setzero_epi32();
switch (i-(ii<<2))
{
/* first particle */
case 0:
/* reorder velocity components */
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)p,
_mm512_int2mask(170),(__m512i)q,177);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at,
_mm512_int2mask(68),(__m512i)r,78);
/* reorder weights */
ws = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)s,0);
wt = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)t,0);
wu = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)u,0);
wv = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)v,0);
break;
/* second particle */
case 1:
/* reorder velocity components */
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)q,
_mm512_int2mask(85),(__m512i)p,177);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at,
_mm512_int2mask(68),(__m512i)r,24);
/* reorder weights */
ws = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)s,85);
wt = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)t,85);
wu = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)u,85);
wv = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)v,85);
break;
/* third particle */
case 2:
/* reorder velocity components */
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)p,
_mm512_int2mask(170),(__m512i)q,177);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)r,
_mm512_int2mask(51),(__m512i)v_at,78);
/* reorder weights */
ws = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)s,170);
wt = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)t,170);
wu = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)u,170);
wv = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)v,170);
break;
/* fourth particle */
case 3:
/* reorder velocity components */
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)q,
_mm512_int2mask(85),(__m512i)p,177);
v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_at,78);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at,
_mm512_int2mask(68),(__m512i)r,177);
/* reorder weights */
ws = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)s,255);
wt = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)t,255);
wu = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)u,255);
wv = (__m512)_mm512_mask_shuffle_epi32(v_it,
_mm512_int2mask(119),(__m512i)v,255);
break;
}
_mm512_store_epi32(kk,v_nn);
/* load scu[nn:nn+3] and scu[nn+4:nn+7] field components */
/* dx = amx*amz; */
/* dy = amy*amz; */
/* scu[nn] += vx*dx; */
/* scu[nn+1] += vy*dx; */
/* scu[nn+2] += vz*dx; */
/* dx = dyp*amz; */
/* scu[nn+4] += vx*dy; */
/* scu[nn+1+4] += vy*dy; */
/* scu[nn+2+4] += vz*dy; */
mm = kk[i];
cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255),
&scu[mm]);
cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255),
&scu[mm+16]);
cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),ws,cp);
_mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cp);
_mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cp);
/* mm = nn + 4*mxv; */
/* load scu[mm:mm+3] and scu[mm+4:mm+7] field components */
/* dx = dyp*amz; */
/* dy = dx1*amz; */
/* scu[mm] += vx*dx; */
/* scu[mm+1] += vy*dx; */
/* scu[mm+2] += vz*dx; */
/* scu[mm+4] += vx*dy; */
/* scu[mm+1+4] += vy*dy; */
/* scu[mm+2+4] += vz*dy; */
mm = kk[i] + 4*mxv;
cr = _mm512_mask_loadunpacklo_ps(cr,_mm512_int2mask(255),
&scu[mm]);
cr = _mm512_mask_loadunpackhi_ps(cr,_mm512_int2mask(255),
&scu[mm+16]);
cr = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wt,cr);
_mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cr);
_mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cr);
_mm512_store_epi32(kk,v_ll);
/* nn += 4*mxyv; */
/* load scu[nn:nn+3] and scu[nn+4:nn+7] field components */
/* dx = amx*dzp; */
/* dy = amy*dzp; */
/* scu[nn] += vx*dx; */
/* scu[nn+1] += vy*dx; */
/* scu[nn+2] += vz*dx; */
/* scu[nn+4] += vx*dy; */
/* scu[nn+1+4] += vy*dy; */
/* scu[nn+2+4] += vz*dy; */
mm = kk[i];
cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255),
&scu[mm]);
cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255),
&scu[mm+16]);
cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wu,cp);
_mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cp);
_mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cp);
/* mm = nn + 4*mxv; */
/* load scu[mm:mm+3] and scu[mm+4:mm+7] field components */
/* dx = dyp*dzp; */
/* dy = dx1*dzp; */
/* scu[mm] += vx*dx; */
/* scu[mm+1] += vy*dx; */
/* scu[mm+2] += vz*dx; */
/* scu[mm+4] += vx*dy; */
/* scu[mm+1+4] += vy*dy; */
/* scu[mm+2+4] += vz*dy; */
mm = kk[i] + 4*mxv;
cr = _mm512_mask_loadunpacklo_ps(cr,_mm512_int2mask(255),
&scu[mm]);
cr = _mm512_mask_loadunpackhi_ps(cr,_mm512_int2mask(255),
&scu[mm+16]);
cr = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wv,cr);
_mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cr);
_mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cr);
}
/* advance position half a time-step */
/* dx = x + vx*dt; */
/* dy = y + vy*dt; */
/* dz = z + vz*dt; */
v_dx = _mm512_fmadd_ps(v_vx,v_dt,v_x);
v_dy = _mm512_fmadd_ps(v_vy,v_dt,v_y);
v_dz = _mm512_fmadd_ps(v_vz,v_dt,v_z);
/* reflecting boundary conditions */
if (ipbc==2) {
/* if ((dx < edgelx) || (dx >= edgerx)) { */
/* dx = x; */
/* ppart[j+3*nppmx+npoff] = -ux; */
/* } */
msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx,
_MM_CMPINT_GE));
v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x);
v_ux = _mm512_mask_sub_ps(v_ux,msk,v_zero,v_ux);
/* write output if test result is true for any particle */
if (msk)
_mm512_store_ps(&ppart[j+3*nppmx+npoff],v_ux);
/* if ((dy < edgely) || (dy >= edgery)) { */
/* dy = y; */
/* ppart[j+4*nppmx+npoff] = -uy; */
/* } */
msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery,
_MM_CMPINT_GE));
v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y);
v_uy = _mm512_mask_sub_ps(v_uy,msk,v_zero,v_uy);
/* write output if test result is true for any particle */
if (msk)
_mm512_store_ps(&ppart[j+4*nppmx+npoff],v_uy);
/* if ((dz < edgelz) || (dz >= edgerz)) { */
/* dz = z; */
/* ppart[j+5*nppmx+npoff] = -uz; */
/* } */
msk = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dz,v_edgerz,
_MM_CMPINT_GE));
v_dz = _mm512_mask_blend_ps(msk,v_dz,v_z);
v_uz = _mm512_mask_sub_ps(v_uz,msk,v_zero,v_uz);
/* write output if test result is true for any particle */
if (msk)
_mm512_store_ps(&ppart[j+5*nppmx+npoff],v_uz);
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
/* if ((dx < edgelx) || (dx >= edgerx)) { */
/* dx = x; */
/* ppart[j+3*nppmx+npoff] = -ux; */
/* } */
msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx,
_MM_CMPINT_GE));
v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x);
v_ux = _mm512_mask_sub_ps(v_ux,msk,v_zero,v_ux);
/* write output if test result is true for any particle */
if (msk)
_mm512_store_ps(&ppart[j+3*nppmx+npoff],v_ux);
/* if ((dy < edgely) || (dy >= edgery)) { */
/* dy = y; */
/* ppart[j+4*nppmx+npoff] = -uy; */
/* } */
msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery,
_MM_CMPINT_GE));
v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y);
v_uy = _mm512_mask_sub_ps(v_uy,msk,v_zero,v_uy);
/* write output if test result is true for any particle */
if (msk)
_mm512_store_ps(&ppart[j+4*nppmx+npoff],v_uy);
}
/* set new position */
/* ppart[j+npoff] = dx; */
/* ppart[j+nppmx+npoff] = dy; */
/* ppart[j+2*nppmx+npoff] = dz; */
_mm512_store_ps(&ppart[j+npoff],v_dx);
_mm512_store_ps(&ppart[j+nppmx+npoff],v_dy);
_mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz);
}
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
z = ppart[j+2*nppmx+npoff];
nn = x;
mm = y;
ll = z;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
dzp = z - (float) ll;
/* find inverse gamma */
ux = ppart[j+3*nppmx+npoff];
uy = ppart[j+4*nppmx+npoff];
uz = ppart[j+5*nppmx+npoff];
p2 = ux*ux + uy*uy + uz*uz;
gami = 1.0f/sqrtf(1.0f + p2*ci2);
/* calculate weights */
nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff));
amx = qm - dxp;
amy = 1.0f - dyp;
dx1 = dxp*dyp;
dyp = amx*dyp;
amx = amx*amy;
amz = 1.0f - dzp;
amy = dxp*amy;
/* deposit current within tile to local accumulator */
dx = amx*amz;
dy = amy*amz;
vx = ux*gami;
vy = uy*gami;
vz = uz*gami;
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
dx = dyp*amz;
scu[nn+4] += vx*dy;
scu[nn+1+4] += vy*dy;
scu[nn+2+4] += vz*dy;
dy = dx1*amz;
mm = nn + 4*mxv;
scu[mm] += vx*dx;
scu[mm+1] += vy*dx;
scu[mm+2] += vz*dx;
dx = amx*dzp;
scu[mm+4] += vx*dy;
scu[mm+1+4] += vy*dy;
scu[mm+2+4] += vz*dy;
dy = amy*dzp;
nn += 4*mxyv;
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
dx = dyp*dzp;
scu[nn+4] += vx*dy;
scu[nn+1+4] += vy*dy;
scu[nn+2+4] += vz*dy;
dy = dx1*dzp;
mm = nn + 4*mxv;
scu[mm] += vx*dx;
scu[mm+1] += vy*dx;
scu[mm+2] += vz*dx;
scu[mm+4] += vx*dy;
scu[mm+1+4] += vy*dy;
scu[mm+2+4] += vz*dy;
/* advance position half a time-step */
dx = x + vx*dt;
dy = y + vy*dt;
dz = z + vz*dt;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
ppart[j+3*nppmx+npoff] = -ux;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
ppart[j+4*nppmx+npoff] = -uy;
}
if ((dz < edgelz) || (dz >= edgerz)) {
dz = z;
ppart[j+5*nppmx+npoff] = -uz;
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
ppart[j+3*nppmx+npoff] = -ux;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
ppart[j+4*nppmx+npoff] = -uy;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
ppart[j+2*nppmx+npoff] = dz;
}
/* deposit current to interior points in global array */
nn = nxv - noff;
nn = mx < nn ? mx : nn;
mm = nyv - moff;
mm = my < mm ? my : mm;
ll = nzv - loff;
ll = mz < ll ? mz : ll;
for (k = 1; k < ll; k++) {
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
cu[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[4*(i+mxv*j+mxyv*k)];
cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[1+4*(i+mxv*j+mxyv*k)];
cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[2+4*(i+mxv*j+mxyv*k)];
}
}
}
/* deposit current to edge points in global array */
lm = nzv - loff;
lm = mz+1 < lm ? mz+1 : lm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[4*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[4*(i+mxv*j)];
#pragma omp atomic
cu[1+4*(i+noff+nxv*(j+moff)+nxyv*loff)]
+= scu[1+4*(i+mxv*j)];
#pragma omp atomic
cu[2+4*(i+noff+nxv*(j+moff)+nxyv*loff)]
+= scu[2+4*(i+mxv*j)];
if (lm > mz) {
#pragma omp atomic
cu[4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[4*(i+mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[1+4*(i+mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[2+4*(i+mxv*j+mxyv*(lm-1))];
}
}
}
nm = nxv - noff;
nm = mx+1 < nm ? mx+1 : nm;
mm = nyv - moff;
mm = my+1 < mm ? my+1 : mm;
for (k = 0; k < ll; k++) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[4*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[4*(i+mxyv*k)];
#pragma omp atomic
cu[1+4*(i+noff+nxv*moff+nxyv*(k+loff))]
+= scu[1+4*(i+mxyv*k)];
#pragma omp atomic
cu[2+4*(i+noff+nxv*moff+nxyv*(k+loff))]
+= scu[2+4*(i+mxyv*k)];
if (mm > my) {
#pragma omp atomic
cu[4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))]
+= scu[4*(i+mxv*(mm-1)+mxyv*k)];
#pragma omp atomic
cu[1+4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))]
+= scu[1+4*(i+mxv*(mm-1)+mxyv*k)];
#pragma omp atomic
cu[2+4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))]
+= scu[2+4*(i+mxv*(mm-1)+mxyv*k)];
}
}
for (j = 0; j < mm; j++) {
#pragma omp atomic
cu[4*(noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[4*(mxv*j+mxyv*k)];
#pragma omp atomic
cu[1+4*(noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[1+4*(mxv*j+mxyv*k)];
#pragma omp atomic
cu[2+4*(noff+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[2+4*(mxv*j+mxyv*k)];
if (nm > mx) {
#pragma omp atomic
cu[4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[4*(nm-1+mxv*j+mxyv*k)];
#pragma omp atomic
cu[1+4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[1+4*(nm-1+mxv*j+mxyv*k)];
#pragma omp atomic
cu[2+4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))]
+= scu[2+4*(nm-1+mxv*j+mxyv*k)];
}
}
}
if (lm > mz) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[4*(i+noff+nxv*moff+nxyv*(lm+loff-1))]
+= scu[4*(i+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(i+noff+nxv*moff+nxyv*(lm+loff-1))]
+= scu[1+4*(i+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(i+noff+nxv*moff+nxyv*(lm+loff-1))]
+= scu[2+4*(i+mxyv*(lm-1))];
if (mm > my) {
#pragma omp atomic
cu[4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))]
+= scu[4*(i+mxv*(mm-1)+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))]
+= scu[1+4*(i+mxv*(mm-1)+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))]
+= scu[2+4*(i+mxv*(mm-1)+mxyv*(lm-1))];
}
}
for (j = 0; j < mm; j++) {
#pragma omp atomic
cu[4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[4*(mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[1+4*(mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[2+4*(mxv*j+mxyv*(lm-1))];
if (nm > mx) {
#pragma omp atomic
cu[4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[4*(nm-1+mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[1+4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[1+4*(nm-1+mxv*j+mxyv*(lm-1))];
#pragma omp atomic
cu[2+4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))]
+= scu[2+4*(nm-1+mxv*j+mxyv*(lm-1))];
}
}
}
}
return;
#undef MXV
#undef MYV
#undef MZV
}
/*--------------------------------------------------------------------*/
void ckncpporder3lt(float ppart[], float ppbuff[], int kpic[],
int ncl[], int ihole[], int idimp, int nppmx,
int nx, int ny, int nz, int mx, int my, int mz,
int mx1, int my1, int mz1, int npbmx, int ntmax,
int *irc) {
/* this subroutine sorts particles by x,y,z grid in tiles of mx, my, mz
linear interpolation, with periodic boundary conditions
tiles are assumed to be arranged in 3D linear memory
algorithm has 3 steps. first, one finds particles leaving tile and
stores their number in each directon, location, and destination in ncl
and ihole. second, a prefix scan of ncl is performed and departing
particles are buffered in ppbuff in direction order. finally, we copy
the incoming particles from other tiles into ppart.
input: all except ppbuff, ncl, ihole, irc
output: ppart, ppbuff, kpic, ncl, ihole, irc
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = position z of particle n in tile m
ppbuff[m][i][n] = i co-ordinate of particle n in tile m
kpic[m] = number of particles in tile m
ncl[m][i] = number of particles going to destination i, tile m
ihole[m][:][0] = location of hole in array left by departing particle
ihole[m][:][1] = direction destination of particle leaving hole
all for tile m
ihole[m][0][0] = ih, number of holes left (error, if negative)
idimp = size of phase space = 6
nppmx = maximum number of particles in tile
nx/ny/nz = system length in x/y/z direction
mx/my/mz = number of grids in sorting cell in x/y/z
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mz1 = (system length in z direction - 1)/mz + 1
npbmx = size of buffer array ppbuff
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
requires KNC, ppart, ppbuff need to be 64 byte aligned
nppmx, npbmx need to be a multiple of 16
local data */
int mxy1, mxyz1, noff, moff, loff, npoff, npp, nps, nboff, ncoff;
int i, j, k, l, ii, kx, ky, kz, ih, nh, ist, nn, mm, ll;
int ip, j1, j2, kxl, kxr, kk, kl, kr, lk, lr;
float anx, any, anz, edgelx, edgely, edgelz, edgerx, edgery, edgerz;
float dx, dy, dz;
int ks[26];
__m512i v_ist, v_it, v_0, v_1, v_3, v_9;
__m512i v_m1, v_m2, v_m3, v_npp, v_mm, v_is, v_it0, v_ioff;
__m512 v_anx, v_any, v_anz;
__m512 v_dx, v_dy, v_dz, v_x;
__m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz;
__m512 v_zero;
__mmask16 msk1, msk2;
__attribute__((aligned(64))) unsigned int ls[32], lm[32];
mxy1 = mx1*my1;
mxyz1 = mxy1*mz1;
anx = (float) nx;
any = (float) ny;
anz = (float) nz;
v_0 = _mm512_set1_epi32(0);
v_1 = _mm512_set1_epi32(1);
v_3 = _mm512_set1_epi32(3);
v_9 = _mm512_set1_epi32(9);
v_anx = _mm512_set1_ps(anx);
v_any = _mm512_set1_ps(any);
v_anz = _mm512_set1_ps(anz);
v_zero = _mm512_setzero_ps();
/* find and count particles leaving tiles and determine destination */
/* update ppart, ihole, ncl */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,l,ii,noff,moff,loff,npp,npoff,nps,nn,mm,ll,ih,nh,ist,dx, \
dy,dz,edgelx,edgely,edgelz,edgerx,edgery,edgerz,v_it,v_ist,v_edgelx, \
v_edgely,v_edgelz,v_edgerx,v_edgery,v_edgerz,v_dx,v_dy,v_dz,v_x,msk1, \
msk2,ls)
for (l = 0; l < mxyz1; l++) {
loff = l/mxy1;
k = l - mxy1*loff;
loff = mz*loff;
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[l];
npoff = idimp*nppmx*l;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
ll = nz - loff;
ll = mz < ll ? mz : ll;
ih = 0;
nh = 0;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
edgelz = loff;
edgerz = loff + ll;
noff = (ntmax+1)*l;
v_edgelx = _mm512_set1_ps(edgelx);
v_edgely = _mm512_set1_ps(edgely);
v_edgelz = _mm512_set1_ps(edgelz);
v_edgerx = _mm512_set1_ps(edgerx);
v_edgery = _mm512_set1_ps(edgery);
v_edgerz = _mm512_set1_ps(edgerz);
/* clear counters */
/* for (j = 0; j < 26; j++) { */
/* ncl[j+26*l] = 0; */
/* } */
memset((void*)&ncl[26*l],0,26*sizeof(int));
nps = 16*(npp/16);
/* loop over particles in tile in blocks of 16 */
for (j = 0; j < nps; j+=16) {
/* dx = ppart[j+npoff]; */
/* dy = ppart[j+nppmx+npoff]; */
/* dz = ppart[j+2*nppmx+npoff]; */
v_dx = _mm512_load_ps(&ppart[j+npoff]);
v_dy = _mm512_load_ps(&ppart[j+nppmx+npoff]);
v_dz = _mm512_load_ps(&ppart[j+2*nppmx+npoff]);
/* find particles going out of bounds */
/* ist = 0; */
v_ist = _mm512_setzero_epi32();
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* ist = direction particle is going */
/* if (dx >= edgerx) { */
/* if (dx >= anx) */
/* ppart[j+npoff] = dx - anx; */
/* ist = 2; */
/* } */
msk1 = _mm512_cmp_ps_mask(v_dx,v_edgerx,_MM_CMPINT_GE);
msk2 = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT);
ii = _mm512_mask2int(_mm512_kor(msk1,msk2));
/* execute if either test result is true for any particle */
if (ii != 0) {
ii = _mm512_mask2int(msk1);
v_x = v_dx;
/* write output if test result is true for any particle */
if (ii != 0) {
v_it = _mm512_add_epi32(v_1,v_1);
v_ist = _mm512_mask_add_epi32(v_ist,msk1,v_ist,v_it);
msk1 = _mm512_cmp_ps_mask(v_dx,v_anx,_MM_CMPINT_GE);
v_x = _mm512_mask_sub_ps(v_x,msk1,v_dx,v_anx);
ii = _mm512_mask2int(msk1);
if (ii != 0)
_mm512_store_ps(&ppart[j+npoff],v_x);
}
/* if (dx < edgelx) { */
/* if (dx < 0.0) { */
/* dx += anx; */
/* if (dx < anx) */
/* ist = 1; */
/* else */
/* dx = 0.0; */
/* ppart[j+npoff] = dx; */
/* } */
/* else { */
/* ist = 1; */
/* } */
/* } */
/* write output if test result is true for any particle */
ii = _mm512_mask2int(msk2);
if (ii != 0) {
v_it = _mm512_mask_mov_epi32(v_0,msk2,v_1);
msk2 = _mm512_cmp_ps_mask(v_dx,v_zero,_MM_CMPINT_LT);
v_x = _mm512_mask_add_ps(v_x,msk2,v_dx,v_anx);
msk1 = _mm512_cmp_ps_mask(v_x,v_anx,_MM_CMPINT_GE);
msk1 = _mm512_kand(msk1,msk2);
v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero);
v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0);
v_ist = _mm512_add_epi32(v_ist,v_it);
ii = _mm512_mask2int(msk2);
if (ii != 0)
_mm512_store_ps(&ppart[j+npoff],v_x);
}
}
/* if (dy >= edgery) { */
/* if (dy >= any) */
/* ppart[j+nppmx+npoff] = dy - any; */
/* ist += 6; */
/* } */
msk1 = _mm512_cmp_ps_mask(v_dy,v_edgery,_MM_CMPINT_GE);
msk2 = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT);
ii = _mm512_mask2int(_mm512_kor(msk1,msk2));
/* execute if either test result is true for any particle */
if (ii != 0) {
ii = _mm512_mask2int(msk1);
v_x = v_dy;
/* write output if test result is true for any particle */
if (ii != 0) {
v_it = _mm512_add_epi32(v_3,v_3);
v_ist = _mm512_mask_add_epi32(v_ist,msk1,v_ist,v_it);
msk1 = _mm512_cmp_ps_mask(v_dy,v_any,_MM_CMPINT_GE);
v_x = _mm512_mask_sub_ps(v_x,msk1,v_dy,v_any);
ii = _mm512_mask2int(msk1);
if (ii != 0)
_mm512_store_ps(&ppart[j+nppmx+npoff],v_x);
}
/* if (dy < edgely) { */
/* if (dy < 0.0) { */
/* dy += any; */
/* if (dy < any) */
/* ist += 3; */
/* else */
/* dy = 0.0; */
/* ppart[j+nppmx+npoff] = dy; */
/* } */
/* else { */
/* ist += 3; */
/* } */
/* } */
/* write output if test result is true for any particle */
ii = _mm512_mask2int(msk2);
if (ii != 0) {
v_it = _mm512_mask_mov_epi32(v_0,msk2,v_3);
msk2 = _mm512_cmp_ps_mask(v_dy,v_zero,_MM_CMPINT_LT);
v_x = _mm512_mask_add_ps(v_x,msk2,v_dy,v_any);
msk1 = _mm512_cmp_ps_mask(v_x,v_any,_MM_CMPINT_GE);
msk1 = _mm512_kand(msk1,msk2);
v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero);
v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0);
v_ist = _mm512_add_epi32(v_ist,v_it);
ii = _mm512_mask2int(msk2);
if (ii != 0)
_mm512_store_ps(&ppart[j+nppmx+npoff],v_x);
}
}
/* if (dz >= edgerz) { */
/* if (dz >= anz) */
/* ppart[j+2*nppmx+npoff] = dz - anz; */
/* ist += 18; */
/* } */
msk1 = _mm512_cmp_ps_mask(v_dz,v_edgerz,_MM_CMPINT_GE);
msk2 = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT);
ii = _mm512_mask2int(_mm512_kor(msk1,msk2));
/* execute if either test result is true for any particle */
if (ii != 0) {
ii = _mm512_mask2int(msk1);
v_x = v_dz;
/* write output if test result is true for any particle */
if (ii != 0) {
v_it = _mm512_add_epi32(v_9,v_9);
v_ist = _mm512_mask_add_epi32(v_ist,msk1,v_ist,v_it);
msk1 = _mm512_cmp_ps_mask(v_dz,v_anz,_MM_CMPINT_GE);
v_x = _mm512_mask_sub_ps(v_x,msk1,v_dz,v_anz);
ii = _mm512_mask2int(msk1);
if (ii != 0)
_mm512_store_ps(&ppart[j+2*nppmx+npoff],v_x);
}
/* if (dz < edgelz) { */
/* if (dz < 0.0) { */
/* dz += anz; */
/* if (dz < anz) */
/* ist += 9; */
/* else */
/* dz = 0.0; */
/* ppart[j+2*nppmx+npoff] = dz; */
/* } */
/* else { */
/* ist += 9; */
/* } */
/* } */
/* write output if test result is true for any particle */
ii = _mm512_mask2int(msk2);
if (ii != 0) {
v_it = _mm512_mask_mov_epi32(v_0,msk2,v_9);
msk2 = _mm512_cmp_ps_mask(v_dz,v_zero,_MM_CMPINT_LT);
v_x = _mm512_mask_add_ps(v_x,msk2,v_dz,v_anz);
msk1 = _mm512_cmp_ps_mask(v_x,v_anz,_MM_CMPINT_GE);
msk1 = _mm512_kand(msk1,msk2);
v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero);
v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0);
v_ist = _mm512_add_epi32(v_ist,v_it);
ii = _mm512_mask2int(msk2);
if (ii != 0)
_mm512_store_ps(&ppart[j+2*nppmx+npoff],v_x);
}
}
/* increment counters */
/* if (ist > 0) { */
/* ncl[ist+26*l-1] += 1; */
/* ih += 1; */
/* if (ih <= ntmax) { */
/* ihole[2*(ih+(ntmax+1)*l)] = j + i + 1; */
/* ihole[1+2*(ih+(ntmax+1)*l)] = ist; */
/* } */
/* else { */
/* nh = 1; */
/* } */
/* } */
_mm512_store_epi32(ls,v_ist);
for (i = 0; i < 16; i++) {
ist = ls[i];
if (ist > 0) {
ncl[ist+26*l-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+noff)] = j + i + 1;
ihole[1+2*(ih+noff)] = ist;
}
else {
nh = 1;
}
}
}
}
/* loop over remaining particles in tile */
for (j = nps; j < npp; j++) {
dx = ppart[j+npoff];
dy = ppart[j+nppmx+npoff];
dz = ppart[j+2*nppmx+npoff];
/* find particles going out of bounds */
ist = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* ist = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
ppart[j+npoff] = dx - anx;
ist = 2;
}
else if (dx < edgelx) {
if (dx < 0.0) {
dx += anx;
if (dx < anx)
ist = 1;
else
dx = 0.0;
ppart[j+npoff] = dx;
}
else {
ist = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
ppart[j+nppmx+npoff] = dy - any;
ist += 6;
}
else if (dy < edgely) {
if (dy < 0.0) {
dy += any;
if (dy < any)
ist += 3;
else
dy = 0.0;
ppart[j+nppmx+npoff] = dy;
}
else {
ist += 3;
}
}
if (dz >= edgerz) {
if (dz >= anz)
ppart[j+2*nppmx+npoff] = dz - anz;
ist += 18;
}
else if (dz < edgelz) {
if (dz < 0.0) {
dz += anz;
if (dz < anz)
ist += 9;
else
dz = 0.0;
ppart[j+2*nppmx+npoff] = dz;
}
else {
ist += 9;
}
}
if (ist > 0) {
ncl[ist+26*l-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+noff)] = j + 1;
ihole[1+2*(ih+noff)] = ist;
}
else {
nh = 1;
}
}
}
/* set error and end of file flag */
if (nh > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*noff] = ih;
}
/* ihole overflow */
if (*irc > 0)
return;
/* buffer particles that are leaving tile: update ppbuff, ncl */
/* loop over tiles */
msk1 = _mm512_int2mask(1023);
v_m1 = _mm512_set_epi32(11,11,11,11,11,10,9,8,3,3,3,3,3,2,1,0);
v_m2 = _mm512_set_epi32(7,7,7,7,7,7,7,7,7,6,5,4,3,2,1,0);
#pragma omp parallel for \
private(i,j,l,npoff,nboff,noff,nps,mm,ii,ll,j1,ist,nh,ip,v_it,v_is, \
v_it0,v_ioff,ls,lm)
for (l = 0; l < mxyz1; l++) {
npoff = idimp*nppmx*l;
nboff = idimp*npbmx*l;
noff = (ntmax+1)*l;
/* find address offset for ordered ppbuff array */
/* isum = 0; */
/* for (j = 0; j < 26; j++) { */
/* ist = ncl[j+26*l]; */
/* ncl[j+26*l] = isum; */
/* isum += ist; */
/* } */
/* perform exclusive prefix scan */
/* load 26 data elements into 32 length vector with zero padding */
mm = 26*l;
v_it = _mm512_loadunpacklo_epi32(v_0,&ncl[mm]);
v_it = _mm512_loadunpackhi_epi32(v_it,&ncl[mm+16]);
_mm512_store_epi32(ls,v_it);
v_is = _mm512_mask_loadunpacklo_epi32(v_0,msk1,&ncl[mm+16]);
v_is = _mm512_mask_loadunpackhi_epi32(v_is,msk1,&ncl[mm+32]);
_mm512_store_epi32(&ls[16],v_is);
v_ioff = _mm512_setzero_epi32();
/* vector loop over elements in blocks of 16 */
for (j = 0; j < 32; j+=16) {
/* load data */
v_it0 = _mm512_load_epi32(&ls[j]);
/* first pass */
v_is = _mm512_shuffle_epi32(v_it0,177);
v_it = _mm512_mask_add_epi32(v_it0,_mm512_int2mask(43690),
v_it0,v_is);
/* second pass */
v_is = _mm512_shuffle_epi32(v_it,80);
v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(52428),v_it,
v_is);
/* third pass */
v_is = _mm512_permutevar_epi32(v_m1,v_it);
v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(61680),v_it,
v_is);
/* fourth pass */
v_is = _mm512_permutevar_epi32(v_m2,v_it);
v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(65280),v_it,
v_is);
/* add offset */
v_it = _mm512_add_epi32(v_it,v_ioff);
/* next offset */
if (j==0) {
v_ioff = _mm512_shuffle_epi32(v_it,255);
v_ioff = _mm512_permute4f128_epi32(v_ioff,255);
}
/* subtract for exclusive scan */
v_it = _mm512_sub_epi32(v_it,v_it0);
/* write data */
_mm512_store_epi32(&ls[j],v_it);
}
nh = ihole[2*noff];
nps = 16*(nh/16);
/* nps = (nh >> 4) << 4; */
ip = 0;
/* loop over particles leaving tile in groups of 16 */
for (j = 0; j < nps; j+=16) {
/* j1 = ihole[2*(j+1+(ntmax+1)*l)] - 1; */
/* ist = ihole[1+2*(j+1+(ntmax+1)*l)]; */
mm = 2*(j+1+noff);
v_it = _mm512_loadunpacklo_epi32(v_0,&ihole[mm]);
v_it = _mm512_loadunpackhi_epi32(v_it,&ihole[mm+16]);
_mm512_store_epi32(lm,v_it);
mm += 16;
v_is = _mm512_loadunpacklo_epi32(v_0,&ihole[mm]);
v_is = _mm512_loadunpackhi_epi32(v_is,&ihole[mm+16]);
_mm512_store_epi32(&lm[16],v_is);
/* buffer particles that are leaving tile, in direction order */
for (ll = 0; ll < 16; ll++) {
j1 = lm[2*ll] - 1;
ist = lm[1+2*ll];
ii = ls[ist-1];
if (ii < npbmx) {
for (i = 0; i < idimp; i++) {
ppbuff[ii+npbmx*i+nboff]
= ppart[j1+nppmx*i+npoff];
}
}
else {
ip = 1;
}
ls[ist-1] = ii + 1;
}
}
/* loop over remaining particles leaving tile */
for (j = nps; j < nh; j++) {
/* buffer particles that are leaving tile, in direction order */
j1 = ihole[2*(j+1+noff)] - 1;
ist = ihole[1+2*(j+1+noff)];
ii = ls[ist-1];
if (ii < npbmx) {
for (i = 0; i < idimp; i++) {
ppbuff[ii+npbmx*i+nboff]
= ppart[j1+nppmx*i+npoff];
}
}
else {
ip = 1;
}
ls[ist-1] = ii + 1;
}
/* store 26 data elements into ncl */
mm = 26*l;
v_it = _mm512_load_epi32(ls);
v_is = _mm512_load_epi32(&ls[16]);
_mm512_packstorelo_epi32(&ncl[mm],v_it);
_mm512_packstorehi_epi32(&ncl[mm+16],v_it);
_mm512_mask_packstorelo_epi32(&ncl[mm+16],msk1,v_is);
_mm512_mask_packstorehi_epi32(&ncl[mm+32],msk1,v_is);
/* set error */
if (ip > 0)
*irc = ncl[25+26*l];
}
/* ppbuff overflow */
if (*irc > 0)
return;
/* copy incoming particles from buffer into ppart: update ppart, kpic */
/* loop over tiles */
v_ioff = _mm512_set_epi32(15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
v_m1 = _mm512_set1_epi32(nppmx);
#pragma omp parallel for \
private(i,j,k,l,ii,kk,npp,nps,npoff,noff,nboff,kx,ky,kz,kl,kr,kxl,kxr, \
lk,ll,lr,ih,nh,nn,mm,ncoff,ist,j1,j2,ip,v_m2,v_m3,v_it,v_is,v_it0,v_mm, \
v_npp,v_x,msk1,ks,ls)
for (l = 0; l < mxyz1; l++) {
npp = kpic[l];
npoff = idimp*nppmx*l;
noff = (ntmax+1)*l;
v_m2 = _mm512_set1_epi32(noff+1);
v_m3 = _mm512_set1_epi32(npoff);
kz = l/mxy1;
k = l - mxy1*kz;
/* loop over tiles in z, assume periodic boundary conditions */
lk = kz*mxy1;
/* find tile behind */
ll = kz - 1;
if (ll < 0)
ll += mz1;
ll = ll*mxy1;
/* find tile in front */
lr = kz + 1;
if (lr >= mz1)
lr -= mz1;
lr = lr*mxy1;
ky = k/mx1;
/* loop over tiles in y, assume periodic boundary conditions */
kk = ky*mx1;
/* find tile above */
kl = ky - 1;
if (kl < 0)
kl += my1;
kl = kl*mx1;
/* find tile below */
kr = ky + 1;
if (kr >= my1)
kr -= my1;
kr = kr*mx1;
/* loop over tiles in x, assume periodic boundary conditions */
kx = k - ky*mx1;
kxl = kx - 1 ;
if (kxl < 0)
kxl += mx1;
kxr = kx + 1;
if (kxr >= mx1)
kxr -= mx1;
/* find tile number for different directions */
ks[0] = kxr + kk + lk;
ks[1] = kxl + kk + lk;
ks[2] = kx + kr + lk;
ks[3] = kxr + kr + lk;
ks[4] = kxl + kr + lk;
ks[5] = kx + kl + lk;
ks[6] = kxr + kl + lk;
ks[7] = kxl + kl + lk;
ks[8] = kx + kk + lr;
ks[9] = kxr + kk + lr;
ks[10] = kxl + kk + lr;
ks[11] = kx + kr + lr;
ks[12] = kxr + kr + lr;
ks[13] = kxl + kr + lr;
ks[14] = kx + kl + lr;
ks[15] = kxr + kl + lr;
ks[16] = kxl + kl + lr;
ks[17] = kx + kk + ll;
ks[18] = kxr + kk + ll;
ks[19] = kxl + kk + ll;
ks[20] = kx + kr + ll;
ks[21] = kxr + kr + ll;
ks[22] = kxl + kr + ll;
ks[23] = kx + kl + ll;
ks[24] = kxr + kl + ll;
ks[25] = kxl + kl + ll;
/* loop over directions */
nh = ihole[2*noff];
ncoff = 0;
ih = 0;
ist = 0;
j1 = 0;
v_it0 = _mm512_set1_epi32(nh);
v_is = _mm512_add_epi32(v_m2,v_it0);
v_it0 = _mm512_sub_epi32(v_ioff,v_it0);
v_npp = _mm512_set1_epi32(npp);
for (ii = 0; ii < 26; ii++) {
nboff = idimp*npbmx*ks[ii];
if (ii > 0)
ncoff = ncl[ii-1+26*ks[ii]];
/* ip = number of particles coming from direction ii */
ip = ncl[ii+26*ks[ii]] - ncoff;
/* nps = 16*(ip/16); */
nps = (ip >> 4) << 4;
/* loop over particles in this direction in groups of 16 */
for (j = 0; j < nps; j+=16) {
/* insert incoming particles into holes */
/* ih += 1; */
/* if (ih <= nh) { */
/* j1 = ihole[2*(ih+noff)] - 1; */
/* } */
/* place overflow at end of array */
/* else { */
/* j1 = npp; */
/* npp += 1; */
/* } */
v_mm = _mm512_add_epi32(_mm512_set1_epi32(ih),v_it0);
msk1 = _mm512_cmp_epi32_mask(v_mm,v_0,_MM_CMPINT_LT);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_is);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_mm);
v_mm = _mm512_mask_add_epi32(v_mm,_mm512_knot(msk1),v_mm,
v_npp);
v_it = _mm512_mask_i32gather_epi32(v_mm,msk1,v_mm,
(int *)ihole,4);
v_it = _mm512_mask_sub_epi32(v_it,msk1,v_it,v_1);
ih += 16;
nn = ih - nh;
if (nn > 0) {
nn = nn < 16 ? nn : 16;
npp += nn;
}
msk1 = _mm512_cmp_epi32_mask(v_it,v_m1,_MM_CMPINT_LT);
ll = _mm512_mask2int(_mm512_knot(msk1));
v_it = _mm512_add_epi32(v_it,v_m3);
for (i = 0; i < idimp; i++) {
/* if (j1 < nppmx) */
/* ppart[j1+nppmx*i+npoff] */
/* = ppbuff[j+ncoff+npbmx*i+nboff]; */
mm = j + ncoff + npbmx*i + nboff;
v_x = _mm512_loadunpacklo_ps(v_x,&ppbuff[mm]);
v_x = _mm512_loadunpackhi_ps(v_x,&ppbuff[mm+16]);
if (ll==0) {
_mm512_i32scatter_ps((float *)ppart,v_it,v_x,4);
}
else {
_mm512_mask_i32scatter_ps((float *)ppart,msk1,v_it,
v_x,4);
}
v_it = _mm512_add_epi32(v_it,v_m1);
}
if (ll != 0) {
ist = 1;
}
}
/* loop over remaining particles in this direction */
for (j = nps; j < ip; j++) {
ih += 1;
/* insert incoming particles into holes */
if (ih <= nh) {
j1 = ihole[2*(ih+(ntmax+1)*l)] - 1;
}
/* place overflow at end of array */
else {
j1 = npp;
npp += 1;
}
if (j1 < nppmx) {
for (i = 0; i < idimp; i++) {
ppart[j1+nppmx*i+npoff]
= ppbuff[j+ncoff+npbmx*i+nboff];
}
}
else {
ist = 1;
}
}
}
/* set error */
if (ist > 0)
*irc = j1+1;
/* fill up remaining holes in particle array with particles from bottom */
/* holes with locations great than npp-ip do not need to be filled */
if (ih < nh) {
ip = nh - ih;
ii = nh;
nn = ihole[2*(ii+noff)] - 1;
v_it0 = _mm512_set1_epi32(nn);
ih += 1;
j2 = ihole[2*(ih+noff)] - 1;
v_m2 = _mm512_sub_epi32(v_m2,v_1);
/* move particles from end into remaining holes */
/* holes are processed in increasing order */
/* nps = 16*(ip/16); */
nps = (ip >> 4) << 4;
/* loop over particles in groups of 16 */
for (j = 0; j < nps; j+=16) {
/* j2 = ihole[2*(ih+noff)] - 1; */
v_mm = _mm512_add_epi32(_mm512_set1_epi32(ih),v_ioff);
v_mm = _mm512_add_epi32(v_mm,v_m2);
v_mm = _mm512_add_epi32(v_mm,v_mm);
v_is = _mm512_i32gather_epi32(v_mm,(int *)ihole,4);
v_is = _mm512_sub_epi32(v_is,v_1);
/* j1 = npp - j - 1; */
/* if (j1==nn) { */
/* ii -= 1; */
/* nn = ihole[2*(ii+(ntmax+1)*l)] - 1; */
/* } */
kk = 0;
for (ll = 0; ll < 16; ll++) {
j1 = npp - j - ll - 1;
if (j1==nn) {
ii -= 1;
nn = ihole[2*(ii+(ntmax+1)*l)] - 1;
}
else {
ls[kk] = j1;
kk += 1;
}
}
v_it = _mm512_load_epi32(ls);
v_it0 = _mm512_set1_epi32(kk);
msk1 = _mm512_cmp_epi32_mask(v_ioff,v_it0,_MM_CMPINT_LT);
v_is = _mm512_add_epi32(v_is,v_m3);
v_it = _mm512_add_epi32(v_it,v_m3);
for (i = 0; i < idimp; i++) {
/* ppart[j2+nppmx*i+npoff] */
/* = ppart[j1+nppmx*i+npoff]; */
if (kk==16) {
v_x = _mm512_i32gather_ps(v_it,(float *)ppart,4);
_mm512_i32scatter_ps((float *)ppart,v_is,v_x,4);
}
else {
v_x = _mm512_mask_i32gather_ps(v_zero,msk1,v_it,
(float *)ppart,4);
_mm512_mask_i32scatter_ps((float *)ppart,msk1,v_is,
v_x,4);
}
v_is = _mm512_add_epi32(v_is,v_m1);
v_it = _mm512_add_epi32(v_it,v_m1);
}
ih += kk;
/* holes with locations great than npp-ip do not need to be filled */
}
/* loop over remaining particles */
if (nps < ip) {
nn = ihole[2*(ii+noff)] - 1;
j2 = ihole[2*(ih+noff)] - 1;
}
for (j = nps; j < ip; j++) {
j1 = npp - j - 1;
if (j1==nn) {
ii -= 1;
nn = ihole[2*(ii+noff)] - 1;
}
else {
for (i = 0; i < idimp; i++) {
ppart[j2+nppmx*i+npoff]
= ppart[j1+nppmx*i+npoff];
}
ih += 1;
j2 = ihole[2*(ih+(ntmax+1)*l)] - 1;
}
}
npp -= ip;
}
kpic[l] = npp;
}
return;
}
/*--------------------------------------------------------------------*/
void ckncpporderf3lt(float ppart[], float ppbuff[], int kpic[],
int ncl[], int ihole[], int idimp, int nppmx,
int mx1, int my1, int mz1, int npbmx, int ntmax,
int *irc) {
/* this subroutine sorts particles by x,y,z grid in tiles of mx, my, mz
linear interpolation, with periodic boundary conditions
tiles are assumed to be arranged in 3D linear memory
the algorithm has 2 steps. first, a prefix scan of ncl is performed
and departing particles are buffered in ppbuff in direction order.
then we copy the incoming particles from other tiles into ppart.
it assumes that the number, location, and destination of particles
leaving a tile have been previously stored in ncl and ihole by the
ckncgppushf3lt subroutine.
input: all except ppbuff, irc
output: ppart, ppbuff, kpic, ncl, irc
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = position z of particle n in tile m
ppbuff[m][i][n] = i co-ordinate of particle n in tile m
kpic[m] = number of particles in tile m
ncl[m][i] = number of particles going to destination i, tile m
ihole[m][:][0] = location of hole in array left by departing particle
ihole[m][:][1] = direction destination of particle leaving hole
all for tile m
ihole[m][0][0] = ih, number of holes left (error, if negative)
idimp = size of phase space = 6
nppmx = maximum number of particles in tile
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mz1 = (system length in z direction - 1)/mz + 1
npbmx = size of buffer array ppbuff
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
requires KNC, ppart, ppbuff need to be 64 byte aligned
nppmx, npbmx need to be a multiple of 16
local data */
int mxy1, mxyz1, noff, npp, npoff, nps, nboff, ncoff;
int i, j, k, l, ii, kx, ky, kz, ih, nh, ist, nn, mm, ll;
int ip, j1, j2, kxl, kxr, kk, kl, kr, lk, lr;
int ks[26];
__m512i v_it, v_0, v_1;
__m512i v_m1, v_m2, v_m3, v_npp, v_mm, v_is, v_it0, v_ioff;
__m512 v_x, v_zero;
__mmask16 msk1;
__attribute__((aligned(64))) unsigned int ls[32], lm[32];
mxy1 = mx1*my1;
mxyz1 = mxy1*mz1;
v_0 = _mm512_set1_epi32(0);
v_1 = _mm512_set1_epi32(1);
v_zero = _mm512_setzero_ps();
/* buffer particles that are leaving tile: update ppbuff, ncl */
/* loop over tiles */
msk1 = _mm512_int2mask(1023);
v_m1 = _mm512_set_epi32(11,11,11,11,11,10,9,8,3,3,3,3,3,2,1,0);
v_m2 = _mm512_set_epi32(7,7,7,7,7,7,7,7,7,6,5,4,3,2,1,0);
#pragma omp parallel for \
private(i,j,l,npoff,nboff,noff,nps,mm,ii,ll,j1,ist,nh,ip,v_it,v_is, \
v_it0,v_ioff,ls,lm)
for (l = 0; l < mxyz1; l++) {
npoff = idimp*nppmx*l;
nboff = idimp*npbmx*l;
noff = (ntmax+1)*l;
/* find address offset for ordered ppbuff array */
/* isum = 0; */
/* for (j = 0; j < 26; j++) { */
/* ist = ncl[j+26*l]; */
/* ncl[j+26*l] = isum; */
/* isum += ist; */
/* } */
/* perform exclusive prefix scan */
/* load 26 data elements into 32 length vector with zero padding */
mm = 26*l;
v_it = _mm512_loadunpacklo_epi32(v_0,&ncl[mm]);
v_it = _mm512_loadunpackhi_epi32(v_it,&ncl[mm+16]);
_mm512_store_epi32(ls,v_it);
v_is = _mm512_mask_loadunpacklo_epi32(v_0,msk1,&ncl[mm+16]);
v_is = _mm512_mask_loadunpackhi_epi32(v_is,msk1,&ncl[mm+32]);
_mm512_store_epi32(&ls[16],v_is);
v_ioff = _mm512_setzero_epi32();
/* vector loop over elements in blocks of 16 */
for (j = 0; j < 32; j+=16) {
/* load data */
v_it0 = _mm512_load_epi32(&ls[j]);
/* first pass */
v_is = _mm512_shuffle_epi32(v_it0,177);
v_it = _mm512_mask_add_epi32(v_it0,_mm512_int2mask(43690),
v_it0,v_is);
/* second pass */
v_is = _mm512_shuffle_epi32(v_it,80);
v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(52428),v_it,
v_is);
/* third pass */
v_is = _mm512_permutevar_epi32(v_m1,v_it);
v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(61680),v_it,
v_is);
/* fourth pass */
v_is = _mm512_permutevar_epi32(v_m2,v_it);
v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(65280),v_it,
v_is);
/* add offset */
v_it = _mm512_add_epi32(v_it,v_ioff);
/* next offset */
if (j==0) {
v_ioff = _mm512_shuffle_epi32(v_it,255);
v_ioff = _mm512_permute4f128_epi32(v_ioff,255);
}
/* subtract for exclusive scan */
v_it = _mm512_sub_epi32(v_it,v_it0);
/* write data */
_mm512_store_epi32(&ls[j],v_it);
}
nh = ihole[2*noff];
nps = 16*(nh/16);
/* nps = (nh >> 4) << 4; */
ip = 0;
/* loop over particles leaving tile in groups of 16 */
for (j = 0; j < nps; j+=16) {
/* j1 = ihole[2*(j+1+(ntmax+1)*l)] - 1; */
/* ist = ihole[1+2*(j+1+(ntmax+1)*l)]; */
mm = 2*(j+1+noff);
v_it = _mm512_loadunpacklo_epi32(v_0,&ihole[mm]);
v_it = _mm512_loadunpackhi_epi32(v_it,&ihole[mm+16]);
_mm512_store_epi32(lm,v_it);
mm += 16;
v_is = _mm512_loadunpacklo_epi32(v_0,&ihole[mm]);
v_is = _mm512_loadunpackhi_epi32(v_is,&ihole[mm+16]);
_mm512_store_epi32(&lm[16],v_is);
/* buffer particles that are leaving tile, in direction order */
for (ll = 0; ll < 16; ll++) {
j1 = lm[2*ll] - 1;
ist = lm[1+2*ll];
ii = ls[ist-1];
if (ii < npbmx) {
for (i = 0; i < idimp; i++) {
ppbuff[ii+npbmx*i+nboff]
= ppart[j1+nppmx*i+npoff];
}
}
else {
ip = 1;
}
ls[ist-1] = ii + 1;
}
}
/* loop over remaining particles leaving tile */
for (j = nps; j < nh; j++) {
/* buffer particles that are leaving tile, in direction order */
j1 = ihole[2*(j+1+noff)] - 1;
ist = ihole[1+2*(j+1+noff)];
ii = ls[ist-1];
if (ii < npbmx) {
for (i = 0; i < idimp; i++) {
ppbuff[ii+npbmx*i+nboff]
= ppart[j1+nppmx*i+npoff];
}
}
else {
ip = 1;
}
ls[ist-1] = ii + 1;
}
/* store 26 data elements into ncl */
mm = 26*l;
v_it = _mm512_load_epi32(ls);
v_is = _mm512_load_epi32(&ls[16]);
_mm512_packstorelo_epi32(&ncl[mm],v_it);
_mm512_packstorehi_epi32(&ncl[mm+16],v_it);
_mm512_mask_packstorelo_epi32(&ncl[mm+16],msk1,v_is);
_mm512_mask_packstorehi_epi32(&ncl[mm+32],msk1,v_is);
/* set error */
if (ip > 0)
*irc = ncl[25+26*l];
}
/* ppbuff overflow */
if (*irc > 0)
return;
/* copy incoming particles from buffer into ppart: update ppart, kpic */
/* loop over tiles */
v_ioff = _mm512_set_epi32(15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
v_m1 = _mm512_set1_epi32(nppmx);
#pragma omp parallel for \
private(i,j,k,l,ii,kk,npp,nps,npoff,noff,nboff,kx,ky,kz,kl,kr,kxl,kxr, \
lk,ll,lr,ih,nh,nn,mm,ncoff,ist,j1,j2,ip,v_m2,v_m3,v_it,v_is,v_it0,v_mm, \
v_npp,v_x,msk1,ks,ls)
for (l = 0; l < mxyz1; l++) {
npp = kpic[l];
npoff = idimp*nppmx*l;
noff = (ntmax+1)*l;
v_m2 = _mm512_set1_epi32(noff+1);
v_m3 = _mm512_set1_epi32(npoff);
kz = l/mxy1;
k = l - mxy1*kz;
/* loop over tiles in z, assume periodic boundary conditions */
lk = kz*mxy1;
/* find tile behind */
ll = kz - 1;
if (ll < 0)
ll += mz1;
ll = ll*mxy1;
/* find tile in front */
lr = kz + 1;
if (lr >= mz1)
lr -= mz1;
lr = lr*mxy1;
ky = k/mx1;
/* loop over tiles in y, assume periodic boundary conditions */
kk = ky*mx1;
/* find tile above */
kl = ky - 1;
if (kl < 0)
kl += my1;
kl = kl*mx1;
/* find tile below */
kr = ky + 1;
if (kr >= my1)
kr -= my1;
kr = kr*mx1;
/* loop over tiles in x, assume periodic boundary conditions */
kx = k - ky*mx1;
kxl = kx - 1 ;
if (kxl < 0)
kxl += mx1;
kxr = kx + 1;
if (kxr >= mx1)
kxr -= mx1;
/* find tile number for different directions */
ks[0] = kxr + kk + lk;
ks[1] = kxl + kk + lk;
ks[2] = kx + kr + lk;
ks[3] = kxr + kr + lk;
ks[4] = kxl + kr + lk;
ks[5] = kx + kl + lk;
ks[6] = kxr + kl + lk;
ks[7] = kxl + kl + lk;
ks[8] = kx + kk + lr;
ks[9] = kxr + kk + lr;
ks[10] = kxl + kk + lr;
ks[11] = kx + kr + lr;
ks[12] = kxr + kr + lr;
ks[13] = kxl + kr + lr;
ks[14] = kx + kl + lr;
ks[15] = kxr + kl + lr;
ks[16] = kxl + kl + lr;
ks[17] = kx + kk + ll;
ks[18] = kxr + kk + ll;
ks[19] = kxl + kk + ll;
ks[20] = kx + kr + ll;
ks[21] = kxr + kr + ll;
ks[22] = kxl + kr + ll;
ks[23] = kx + kl + ll;
ks[24] = kxr + kl + ll;
ks[25] = kxl + kl + ll;
/* loop over directions */
nh = ihole[2*noff];
ncoff = 0;
ih = 0;
ist = 0;
j1 = 0;
v_it0 = _mm512_set1_epi32(nh);
v_is = _mm512_add_epi32(v_m2,v_it0);
v_it0 = _mm512_sub_epi32(v_ioff,v_it0);
v_npp = _mm512_set1_epi32(npp);
for (ii = 0; ii < 26; ii++) {
nboff = idimp*npbmx*ks[ii];
if (ii > 0)
ncoff = ncl[ii-1+26*ks[ii]];
/* ip = number of particles coming from direction ii */
ip = ncl[ii+26*ks[ii]] - ncoff;
/* nps = 16*(ip/16); */
nps = (ip >> 4) << 4;
/* loop over particles in this direction in groups of 16 */
for (j = 0; j < nps; j+=16) {
/* insert incoming particles into holes */
/* ih += 1; */
/* if (ih <= nh) { */
/* j1 = ihole[2*(ih+noff)] - 1; */
/* } */
/* place overflow at end of array */
/* else { */
/* j1 = npp; */
/* npp += 1; */
/* } */
v_mm = _mm512_add_epi32(_mm512_set1_epi32(ih),v_it0);
msk1 = _mm512_cmp_epi32_mask(v_mm,v_0,_MM_CMPINT_LT);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_is);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_mm);
v_mm = _mm512_mask_add_epi32(v_mm,_mm512_knot(msk1),v_mm,
v_npp);
v_it = _mm512_mask_i32gather_epi32(v_mm,msk1,v_mm,
(int *)ihole,4);
v_it = _mm512_mask_sub_epi32(v_it,msk1,v_it,v_1);
ih += 16;
nn = ih - nh;
if (nn > 0) {
nn = nn < 16 ? nn : 16;
npp += nn;
}
msk1 = _mm512_cmp_epi32_mask(v_it,v_m1,_MM_CMPINT_LT);
ll = _mm512_mask2int(_mm512_knot(msk1));
v_it = _mm512_add_epi32(v_it,v_m3);
for (i = 0; i < idimp; i++) {
/* if (j1 < nppmx) */
/* ppart[j1+nppmx*i+npoff] */
/* = ppbuff[j+ncoff+npbmx*i+nboff]; */
mm = j + ncoff + npbmx*i + nboff;
v_x = _mm512_loadunpacklo_ps(v_x,&ppbuff[mm]);
v_x = _mm512_loadunpackhi_ps(v_x,&ppbuff[mm+16]);
if (ll==0) {
_mm512_i32scatter_ps((float *)ppart,v_it,v_x,4);
}
else {
_mm512_mask_i32scatter_ps((float *)ppart,msk1,v_it,
v_x,4);
}
v_it = _mm512_add_epi32(v_it,v_m1);
}
if (ll != 0) {
ist = 1;
}
}
/* loop over remaining particles in this direction */
for (j = nps; j < ip; j++) {
ih += 1;
/* insert incoming particles into holes */
if (ih <= nh) {
j1 = ihole[2*(ih+(ntmax+1)*l)] - 1;
}
/* place overflow at end of array */
else {
j1 = npp;
npp += 1;
}
if (j1 < nppmx) {
for (i = 0; i < idimp; i++) {
ppart[j1+nppmx*i+npoff]
= ppbuff[j+ncoff+npbmx*i+nboff];
}
}
else {
ist = 1;
}
}
}
/* set error */
if (ist > 0)
*irc = j1+1;
/* fill up remaining holes in particle array with particles from bottom */
/* holes with locations great than npp-ip do not need to be filled */
if (ih < nh) {
ip = nh - ih;
ii = nh;
nn = ihole[2*(ii+noff)] - 1;
v_it0 = _mm512_set1_epi32(nn);
ih += 1;
j2 = ihole[2*(ih+noff)] - 1;
v_m2 = _mm512_sub_epi32(v_m2,v_1);
/* move particles from end into remaining holes */
/* holes are processed in increasing order */
/* nps = 16*(ip/16); */
nps = (ip >> 4) << 4;
/* loop over particles in groups of 16 */
for (j = 0; j < nps; j+=16) {
/* j2 = ihole[2*(ih+noff)] - 1; */
v_mm = _mm512_add_epi32(_mm512_set1_epi32(ih),v_ioff);
v_mm = _mm512_add_epi32(v_mm,v_m2);
v_mm = _mm512_add_epi32(v_mm,v_mm);
v_is = _mm512_i32gather_epi32(v_mm,(int *)ihole,4);
v_is = _mm512_sub_epi32(v_is,v_1);
/* j1 = npp - j - 1; */
/* if (j1==nn) { */
/* ii -= 1; */
/* nn = ihole[2*(ii+(ntmax+1)*l)] - 1; */
/* } */
kk = 0;
for (ll = 0; ll < 16; ll++) {
j1 = npp - j - ll - 1;
if (j1==nn) {
ii -= 1;
nn = ihole[2*(ii+(ntmax+1)*l)] - 1;
}
else {
ls[kk] = j1;
kk += 1;
}
}
v_it = _mm512_load_epi32(ls);
v_it0 = _mm512_set1_epi32(kk);
msk1 = _mm512_cmp_epi32_mask(v_ioff,v_it0,_MM_CMPINT_LT);
v_is = _mm512_add_epi32(v_is,v_m3);
v_it = _mm512_add_epi32(v_it,v_m3);
for (i = 0; i < idimp; i++) {
/* ppart[j2+nppmx*i+npoff] */
/* = ppart[j1+nppmx*i+npoff]; */
if (kk==16) {
v_x = _mm512_i32gather_ps(v_it,(float *)ppart,4);
_mm512_i32scatter_ps((float *)ppart,v_is,v_x,4);
}
else {
v_x = _mm512_mask_i32gather_ps(v_zero,msk1,v_it,
(float *)ppart,4);
_mm512_mask_i32scatter_ps((float *)ppart,msk1,v_is,
v_x,4);
}
v_is = _mm512_add_epi32(v_is,v_m1);
v_it = _mm512_add_epi32(v_it,v_m1);
}
ih += kk;
/* holes with locations great than npp-ip do not need to be filled */
}
/* loop over remaining particles */
if (nps < ip) {
nn = ihole[2*(ii+noff)] - 1;
j2 = ihole[2*(ih+noff)] - 1;
}
for (j = nps; j < ip; j++) {
j1 = npp - j - 1;
if (j1==nn) {
ii -= 1;
nn = ihole[2*(ii+noff)] - 1;
}
else {
for (i = 0; i < idimp; i++) {
ppart[j2+nppmx*i+npoff]
= ppart[j1+nppmx*i+npoff];
}
ih += 1;
j2 = ihole[2*(ih+(ntmax+1)*l)] - 1;
}
}
npp -= ip;
}
kpic[l] = npp;
}
return;
}
/*--------------------------------------------------------------------*/
void cknccguard3l(float fxyz[], int nx, int ny, int nz, int nxe,
int nye, int nze) {
/* replicate extended periodic vector field fxyz
linear interpolation
nx/ny/nz = system length in x/y direction
nxe = first dimension of field arrays, must be >= nx+1
nye = second dimension of field arrays, must be >= ny+1
nze = third dimension of field arrays, must be >= nz+1
requires KNC, fxyz needs to be 64 byte aligned
nxe needs to be a multiple of 4
local data */
#define N 4
int j, k, l, nxs, nxyen, ll;
nxs = 4*(nx/4);
nxyen = N*nxe*nye;
/* copy edges of extended field */
#pragma omp parallel
{
#pragma omp for nowait \
private(j,k,l,ll)
for (l = 0; l < nz; l++) {
ll = nxyen*l;
for (k = 0; k < ny; k++) {
fxyz[N*nx+N*nxe*k+ll] = fxyz[N*nxe*k+ll];
fxyz[1+N*nx+N*nxe*k+ll] = fxyz[1+N*nxe*k+ll];
fxyz[2+N*nx+N*nxe*k+ll] = fxyz[2+N*nxe*k+ll];
}
/* vector loop over elements in blocks of 4 */
for (j = 0; j < nxs; j+=4) {
_mm512_mask_store_ps(&fxyz[N*j+N*nxe*ny+ll],
_mm512_int2mask(30583),_mm512_load_ps(&fxyz[N*j+ll]));
}
/* loop over remaining elements */
for (j = nxs; j < nx; j++) {
fxyz[N*j+N*nxe*ny+ll] = fxyz[N*j+ll];
fxyz[1+N*j+N*nxe*ny+ll] = fxyz[1+N*j+ll];
fxyz[2+N*j+N*nxe*ny+ll] = fxyz[2+N*j+ll];
}
fxyz[N*nx+N*nxe*ny+ll] = fxyz[ll];
fxyz[1+N*nx+N*nxe*ny+ll] = fxyz[1+ll];
fxyz[2+N*nx+N*nxe*ny+ll] = fxyz[2+ll];
}
#pragma omp for \
private(j,k)
for (k = 0; k < ny; k++) {
/* vector loop over elements in blocks of 4 */
for (j = 0; j < nxs; j+=4) {
_mm512_mask_store_ps(&fxyz[N*j+N*nxe*k+nxyen*nz],
_mm512_int2mask(30583),_mm512_load_ps(&fxyz[N*j+N*nxe*k]));
}
/* loop over remaining elements */
for (j = nxs; j < nx; j++) {
fxyz[N*j+N*nxe*k+nxyen*nz] = fxyz[N*j+N*nxe*k];
fxyz[1+N*j+N*nxe*k+nxyen*nz] = fxyz[1+N*j+N*nxe*k];
fxyz[2+N*j+N*nxe*k+nxyen*nz] = fxyz[2+N*j+N*nxe*k];
}
fxyz[N*nx+N*nxe*k+nxyen*nz] = fxyz[N*nxe*k];
fxyz[1+N*nx+N*nxe*k+nxyen*nz] = fxyz[1+N*nxe*k];
fxyz[2+N*nx+N*nxe*k+nxyen*nz] = fxyz[2+N*nxe*k];
}
}
/* vector loop over elements in blocks of 4 */
for (j = 0; j < nxs; j+=4) {
_mm512_mask_store_ps(&fxyz[N*j+N*nxe*ny+nxyen*nz],
_mm512_int2mask(30583),_mm512_load_ps(&fxyz[N*j]));
}
/* loop over remaining elements */
for (j = nxs; j < nx; j++) {
fxyz[N*j+N*nxe*ny+nxyen*nz] = fxyz[N*j];
fxyz[1+N*j+N*nxe*ny+nxyen*nz] = fxyz[1+N*j];
fxyz[2+N*j+N*nxe*ny+nxyen*nz] = fxyz[2+N*j];
}
fxyz[N*nx+N*nxe*ny+nxyen*nz] = fxyz[0];
fxyz[1+N*nx+N*nxe*ny+nxyen*nz] = fxyz[1];
fxyz[2+N*nx+N*nxe*ny+nxyen*nz] = fxyz[2];
return;
#undef N
}
/*--------------------------------------------------------------------*/
void ckncacguard3l(float cu[], int nx, int ny, int nz, int nxe, int nye,
int nze) {
/* accumulate extended periodic field cu
linear interpolation
nx/ny/nz = system length in x/y direction
nxe = first dimension of field arrays, must be >= nx+1
nye = second dimension of field arrays, must be >= ny+1
nze = third dimension of field arrays, must be >= nz+1
requires KNC, fxyz needs to be 64 byte aligned
nxe needs to be a multiple of 4
local data */
int j, k, l, nxs, nxyen, ll;
__m512 v_cu, v_zero;
nxs = 4*(nx/4);
nxyen = 4*nxe*nye;
v_zero = _mm512_set1_ps(0.0f);
/* accumulate edges of extended field */
for (l = 0; l < nz; l++) {
ll = nxyen*l;
for (k = 0; k < ny; k++) {
cu[4*nxe*k+ll] += cu[4*nx+4*nxe*k+ll];
cu[1+4*nxe*k+ll] += cu[1+4*nx+4*nxe*k+ll];
cu[2+4*nxe*k+ll] += cu[2+4*nx+4*nxe*k+ll];
cu[4*nx+4*nxe*k+ll] = 0.0;
cu[1+4*nx+4*nxe*k+ll] = 0.0;
cu[2+4*nx+4*nxe*k+ll] = 0.0;
}
/* vector loop over elements in blocks of 4 */
for (j = 0; j < nxs; j+=4) {
v_cu = _mm512_load_ps(&cu[4*j+4*nxe*ny+ll]);
v_cu = _mm512_add_ps(_mm512_load_ps(&cu[4*j+ll]),v_cu);
_mm512_store_ps(&cu[4*j+ll],v_cu);
_mm512_store_ps(&cu[4*j+4*nxe*ny+ll],v_zero);
}
/* loop over remaining elements */
for (j = nxs; j < nx; j++) {
cu[4*j+ll] += cu[4*j+4*nxe*ny+ll];
cu[1+4*j+ll] += cu[1+4*j+4*nxe*ny+ll];
cu[2+4*j+ll] += cu[2+4*j+4*nxe*ny+ll];
cu[4*j+4*nxe*ny+ll] = 0.0;
cu[1+4*j+4*nxe*ny+ll] = 0.0;
cu[2+4*j+4*nxe*ny+ll] = 0.0;
}
cu[ll] += cu[4*nx+4*nxe*ny+ll];
cu[1+ll] += cu[1+4*nx+4*nxe*ny+ll];
cu[2+ll] += cu[2+4*nx+4*nxe*ny+ll];
cu[4*nx+4*nxe*ny+ll] = 0.0;
cu[1+4*nx+4*nxe*ny+ll] = 0.0;
cu[2+4*nx+4*nxe*ny+ll] = 0.0;
}
for (k = 0; k < ny; k++) {
/* vector loop over elements in blocks of 4 */
for (j = 0; j < nxs; j+=4) {
v_cu = _mm512_load_ps(&cu[4*j+4*nxe*k+nxyen*nz]);
v_cu = _mm512_add_ps(_mm512_load_ps(&cu[4*j+4*nxe*k]),v_cu);
_mm512_store_ps(&cu[4*j+4*nxe*k],v_cu);
_mm512_store_ps(&cu[4*j+4*nxe*k+nxyen*nz],v_zero);
}
/* loop over remaining elements */
for (j = nxs; j < nx; j++) {
cu[4*j+4*nxe*k] += cu[4*j+4*nxe*k+nxyen*nz];
cu[1+4*j+4*nxe*k] += cu[1+4*j+4*nxe*k+nxyen*nz];
cu[2+4*j+4*nxe*k] += cu[2+4*j+4*nxe*k+nxyen*nz];
cu[4*j+4*nxe*k+nxyen*nz] = 0.0;
cu[1+4*j+4*nxe*k+nxyen*nz] = 0.0;
cu[2+4*j+4*nxe*k+nxyen*nz] = 0.0;
}
cu[4*nxe*k] += cu[4*nx+4*nxe*k+nxyen*nz];
cu[1+4*nxe*k] += cu[1+4*nx+4*nxe*k+nxyen*nz];
cu[2+4*nxe*k] += cu[2+4*nx+4*nxe*k+nxyen*nz];
cu[4*nx+4*nxe*k+nxyen*nz] = 0.0;
cu[1+4*nx+4*nxe*k+nxyen*nz] = 0.0;
cu[2+4*nx+4*nxe*k+nxyen*nz] = 0.0;
}
/* vector loop over elements in blocks of 4 */
for (j = 0; j < nxs; j+=4) {
v_cu = _mm512_load_ps(&cu[4*j+4*nxe*ny+nxyen*nz]);
v_cu = _mm512_add_ps(_mm512_load_ps(&cu[4*j]),v_cu);
_mm512_store_ps(&cu[4*j],v_cu);
_mm512_store_ps(&cu[4*j+4*nxe*ny+nxyen*nz],v_zero);
}
/* loop over remaining elements */
for (j = nxs; j < nx; j++) {
cu[4*j] += cu[4*j+4*nxe*ny+nxyen*nz];
cu[1+4*j] += cu[1+4*j+4*nxe*ny+nxyen*nz];
cu[2+4*j] += cu[2+4*j+4*nxe*ny+nxyen*nz];
cu[4*j+4*nxe*ny+nxyen*nz] = 0.0;
cu[1+4*j+4*nxe*ny+nxyen*nz] = 0.0;
cu[2+4*j+4*nxe*ny+nxyen*nz] = 0.0;
}
cu[0] += cu[4*nx+4*nxe*ny+nxyen*nz];
cu[1] += cu[1+4*nx+4*nxe*ny+nxyen*nz];
cu[2] += cu[2+4*nx+4*nxe*ny+nxyen*nz];
cu[4*nx+4*nxe*ny+nxyen*nz] = 0.0;
cu[1+4*nx+4*nxe*ny+nxyen*nz] = 0.0;
cu[2+4*nx+4*nxe*ny+nxyen*nz] = 0.0;
return;
}
/*--------------------------------------------------------------------*/
void ckncaguard3l(float q[], int nx, int ny, int nz, int nxe, int nye,
int nze) {
/* accumulate extended periodic scalar field q
linear interpolation
nx/ny/nz = system length in x/y direction
nxe = first dimension of field arrays, must be >= nx+1
nye = second dimension of field arrays, must be >= ny+1
nze = third dimension of field arrays, must be >= nz+1
requires KNC, q needs to be 64 byte aligned
nxe needs to be a multiple of 16
local data */
int j, k, l, nxs, nxye, ll;
__m512 v_q;
nxs = 16*(nx/16);
nxye = nxe*nye;
/* accumulate edges of extended field */
#pragma omp parallel
{
#pragma omp for \
private(j,k,l,ll,v_q)
for (l = 0; l < nz; l++) {
ll = nxye*l;
for (k = 0; k < ny; k++) {
q[nxe*k+ll] += q[nx+nxe*k+ll];
q[nx+nxe*k+ll] = 0.0;
}
/* vector loop over elements in blocks of 16 */
for (j = 0; j < nxs; j+=16) {
v_q = _mm512_load_ps(&q[j+nxe*ny+ll]);
v_q = _mm512_add_ps(_mm512_load_ps(&q[j+ll]),v_q);
_mm512_store_ps(&q[j+ll],v_q);
_mm512_store_ps(&q[j+nxe*ny+ll],_mm512_setzero_ps());
}
/* loop over remaining elements */
for (j = nxs; j < nx; j++) {
q[j+ll] += q[j+nxe*ny+ll];
q[j+nxe*ny+ll] = 0.0;
}
q[ll] += q[nx+nxe*ny+ll];
q[nx+nxe*ny+ll] = 0.0;
}
#pragma omp for \
private(j,k,v_q)
for (k = 0; k < ny; k++) {
/* vector loop over elements in blocks of 16 */
for (j = 0; j < nxs; j+=16) {
v_q = _mm512_load_ps(&q[j+nxe*k+nxye*nz]);
v_q = _mm512_add_ps(_mm512_load_ps(&q[j+nxe*k]),v_q);
_mm512_store_ps(&q[j+nxe*k],v_q);
_mm512_store_ps(&q[j+nxe*k+nxye*nz],_mm512_setzero_ps());
}
/* loop over remaining elements */
for (j = nxs; j < nx; j++) {
q[j+nxe*k] += q[j+nxe*k+nxye*nz];
q[j+nxe*k+nxye*nz] = 0.0;
}
q[nxe*k] += q[nx+nxe*k+nxye*nz];
q[nx+nxe*k+nxye*nz] = 0.0;
}
}
/* vector loop over elements in blocks of 16 */
for (j = 0; j < nxs; j+=16) {
v_q = _mm512_load_ps(&q[j+nxe*ny+nxye*nz]);
v_q = _mm512_add_ps(_mm512_load_ps(&q[j]),v_q);
_mm512_store_ps(&q[j],v_q);
_mm512_store_ps(&q[j+nxe*ny+nxye*nz],_mm512_setzero_ps());
}
/* loop over remaining elements */
for (j = nxs; j < nx; j++) {
q[j] += q[j+nxe*ny+nxye*nz];
q[j+nxe*ny+nxye*nz] = 0.0;
}
q[0] += q[nx+nxe*ny+nxye*nz];
q[nx+nxe*ny+nxye*nz] = 0.0;
return;
}
/*--------------------------------------------------------------------*/
void ckncmpois33(float complex q[], float complex fxyz[], int isign,
float complex ffc[], float ax, float ay, float az,
float affp, float *we, int nx, int ny, int nz,
int nxvh, int nyv, int nzv, int nxhd, int nyhd,
int nzhd) {
/* this subroutine solves 3d poisson's equation in fourier space for
force/charge (or convolution of electric field over particle shape)
with periodic boundary conditions.
for isign = 0, output: ffc
input: isign,ax,ay,az,affp,nx,ny,nz,nxvh,nyv,nzv,nxhd,nyhd,nzhd
for isign = -1, output: fxyz, we
input: q,ffc,isign,nx,ny,nz,nxvh,nyv,nzv,nxhd,nyhd,nzhd
approximate flop count is:
59*nxc*nyc*nzc + 26*(nxc*nyc + nxc*nzc + nyc*nzc)
where nxc = nx/2 - 1, nyc = ny/2 - 1, nzc = nz/2 - 1
if isign = 0, form factor array is prepared
if isign is not equal to 0, force/charge is calculated
equation used is:
fx[kz][ky][kx] = -sqrt(-1)*kx*g[kz][ky][kx]*s[kz][ky][kx],
fy[kz][ky][kx] = -sqrt(-1)*ky*g[kz][ky][kx]*s[kz][ky][kx],
fz[kz][ky][kx] = -sqrt(-1)*kz*g[kz][ky][kx]*s[kz][ky][kx],
where kx = 2pi*j/nx, ky = 2pi*k/ny, kz = 2pi*l/nz, and
j,k,l = fourier mode numbers,
g[kz][ky][kx] = (affp/(kx**2+ky**2+kz**2))*s[kz][ky][kx],
s[kz][ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2+(kz*az)**2)/2), except for
fx(kx=pi) = fy(kx=pi) = fz(kx=pi) = 0,
fx(ky=pi) = fy(ky=pi) = fx(ky=pi) = 0,
fx(kz=pi) = fy(kz=pi) = fz(kz=pi) = 0,
fx(kx=0,ky=0,kz=0) = fy(kx=0,ky=0,kz=0) = fz(kx=0,ky=0,kz=0) = 0.
q[l][k][j] = complex charge density for fourier mode (j,k,l)
fxyz[l][k][j][0] = x component of complex force/charge
fxyz[l][k][j][1] = y component of complex force/charge
fxyz[l][k][j][2] = z component of complex force/charge
all for fourier mode (j,k,l)
cimag(ffc[l][k][j]) = finite-size particle shape factor s
for fourier mode (j,k,l)
creal(ffc[l][k][j]) = potential green's function g
for fourier mode (j,k,l)
ax/ay/az = half-width of particle in x/y/z direction
affp = normalization constant = nx*ny*nz/np,
where np=number of particles
electric field energy is also calculated, using
we = nx*ny*nz*sum((affp/(kx**2+ky**2+kz**2))*
|q[kz][ky][kx]*s[kz][ky][kx]|**2)
nx/ny/nz = system length in x/y/z direction
nxvh = first dimension of field arrays, must be >= nxh
nyv = second dimension of field arrays, must be >= ny
nzv = third dimension of field arrays, must be >= nz
nxhd = first dimension of form factor array, must be >= nxh
nyhd = second dimension of form factor array, must be >= nyh
nzhd = third dimension of form factor array, must be >= nzh
requires KNC, q, fxy, ffc need to be 64 byte aligned
nxhd, nxvh need to be a multiple of 8
fxyz needs to have 4 components
local data */
int nxh, nyh, nzh, nxhs, itn, j, k, l, k1, l1, kk, kj, ll, lj;
int nxyhd, nxvyh;
float dnx, dny, dnz, dkx, dky, dkz, at1, at2, at3, at4, at5, at6;
float complex zero, zt1, zt2;
double wp, sum1, sum2;
__m512i v_j, v_it, v_perm;
__m512 v_dnx, v_dny, v_dnz, v_dky, v_dkz, v_at1, v_at2, v_at3, v_at4;
__m512 v_zero, v_zt1, v_zt2, v_zt3, v_zt4;
__m512 a, b, c, d, e, f, g, h;
__m512d v_wp, v_d;
__attribute__((aligned(64))) double dd[8];
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
nzh = 1 > nz/2 ? 1 : nz/2;
nxhs = 8*(nxh/8);
itn = 1 > nxhs ? 1 : nxhs;
nxyhd = nxhd*nyhd;
nxvyh = nxvh*nyv;
dnx = 6.28318530717959/(float) nx;
dny = 6.28318530717959/(float) ny;
dnz = 6.28318530717959/(float) nz;
zero = 0.0 + 0.0*_Complex_I;
v_j = _mm512_set_epi32(7,7,6,6,5,5,4,4,3,3,2,2,1,1,0,0);
v_dnx = _mm512_set1_ps(dnx);
v_dny = _mm512_set1_ps(dny);
v_dnz = _mm512_set1_ps(dnz);
v_zero = _mm512_setzero_ps();
v_perm = _mm512_set_epi32(15,14,11,10,7,6,3,2,13,12,9,8,5,4,1,0);
if (isign != 0)
goto L40;
/* prepare form factor array */
for (l = 0; l < nzh; l++) {
dkz = dnz*(float) l;
ll = nxyhd*l;
at1 = dkz*dkz;
at2 = pow((dkz*az),2);
for (k = 0; k < nyh; k++) {
dky = dny*(float) k;
kk = nxhd*k;
at3 = dky*dky + at1;
at4 = pow((dky*ay),2) + at2;
for (j = 0; j < nxh; j++) {
dkx = dnx*(float) j;
at5 = dkx*dkx + at3;
at6 = exp(-0.5*(pow((dkx*ax),2) + at4));
if (at5==0.0) {
ffc[j+kk+ll] = affp + 1.0*_Complex_I;
}
else {
ffc[j+kk+ll] = (affp*at6/at5) + at6*_Complex_I;
}
}
}
}
return;
/* calculate force/charge and sum field energy */
L40: sum1 = 0.0;
/* mode numbers 0 < kx < nx/2, 0 < ky < ny/2, and 0 < kz < nz/2 */
#pragma omp parallel
{
#pragma omp for nowait \
private(j,k,l,k1,l1,ll,lj,kk,kj,dky,dkz,at1,at2,at3,at4,zt1,zt2,wp, \
v_it,v_dky,v_dkz,v_at1,v_at2,v_at3,v_at4,v_zt1,v_zt2,v_zt3,v_zt4,a,b, \
c,d,e,f,g,h,v_d,v_wp,dd) \
reduction(+:sum1)
for (l = 1; l < nzh; l++) {
dkz = dnz*(float) l;
v_dkz = _mm512_cvtfxpnt_round_adjustepi32_ps(
_mm512_set1_epi32(l),_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dkz = _mm512_mul_ps(v_dnz,v_dkz);
ll = nxyhd*l;
lj = nxvyh*l;
l1 = nxvyh*nz - lj;
wp = 0.0;
v_wp = _mm512_setzero_pd();
for (k = 1; k < nyh; k++) {
dky = dny*(float) k;
v_it = _mm512_set1_epi32(k);
v_dky = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dky = _mm512_mul_ps(v_dny,v_dky);
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
/* vector loop over elements in blocks of 8 */
for (j = 0; j < nxhs; j+=8) {
/* at1 = crealf(ffc[j+kk+ll])*cimagf(ffc[j+kk+ll]); */
v_at1 = _mm512_load_ps((float *)&ffc[j+kk+ll]);
v_at2 = (__m512)_mm512_shuffle_epi32((__m512i)v_at1,177);
v_at1 = _mm512_mul_ps(v_at1,v_at2);
/* at2 = at1*dnx*(float) j; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_at2 = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_at2 = _mm512_mul_ps(v_at1,_mm512_mul_ps(v_dnx,v_at2));
/* at3 = dky*at1; */
v_at3 = _mm512_mul_ps(v_dky,v_at1);
/* at4 = dkz*at1; */
v_at4 = _mm512_mul_ps(v_dkz,v_at1);
/* zt1 = cimagf(q[j+kj+lj]) - crealf(q[j+kj+lj])*_Complex_I; */
v_zt1 = _mm512_load_ps((float *)&q[j+kj+lj]);
v_zt1 = _mm512_mask_sub_ps(v_zt1,_mm512_int2mask(21845),
v_zero,v_zt1);
v_zt1 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,177);
/* zt2 = cimagf(q[j+k1+lj]) - crealf(q[j+k1+lj])*_Complex_I; */
v_zt2 = _mm512_load_ps((float *)&q[j+k1+lj]);
v_zt2 = _mm512_mask_sub_ps(v_zt2,_mm512_int2mask(21845),
v_zero,v_zt2);
v_zt2 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt2,177);
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(3),
v_zero);
v_zt2 = _mm512_mask_mov_ps(v_zt2,_mm512_int2mask(3),
v_zero);
}
/* fxyz[4*(j+kj+lj)] = at2*zt1; */
/* fxyz[1+4*(j+kj+lj)] = at3*zt1; */
/* fxyz[2+4*(j+kj+lj)] = at4*zt1; */
a = _mm512_mul_ps(v_at2,v_zt1);
b = _mm512_mul_ps(v_at3,v_zt1);
c = _mm512_mul_ps(v_at4,v_zt1);
/* perform 4x16 transpose for fxyz field components */
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280),
c,78);
f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255),
a,78);
g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280),
v_zero,78);
h = _mm512_mask_permute4f128_ps(v_zero,
_mm512_int2mask(255),b,78);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),
g,177);
b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855),
e,177);
c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680),
h,177);
d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855),
f,177);
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
_mm512_store_ps((float *)&fxyz[4*(j+kj+lj)],a);
_mm512_store_ps((float *)&fxyz[8+4*(j+kj+lj)],b);
_mm512_store_ps((float *)&fxyz[16+4*(j+kj+lj)],c);
_mm512_store_ps((float *)&fxyz[24+4*(j+kj+lj)],d);
/* fxyz[4*(j+k1+lj)] = at2*zt2; */
/* fxyz[1+4*(j+k1+lj)] = -at3*zt2; */
/* fxyz[2+4*(j+k1+lj)] = at4*zt2; */
a = _mm512_mul_ps(v_at2,v_zt2);
b = _mm512_sub_ps(v_zero,_mm512_mul_ps(v_at3,v_zt2));
c = _mm512_mul_ps(v_at4,v_zt2);
/* perform 4x16 transpose for fxyz field components */
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280),
c,78);
f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255),
a,78);
g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280),
v_zero,78);
h = _mm512_mask_permute4f128_ps(v_zero,
_mm512_int2mask(255),b,78);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),
g,177);
b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855),
e,177);
c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680),
h,177);
d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855),
f,177);
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
_mm512_store_ps((float *)&fxyz[4*(j+k1+lj)],a);
_mm512_store_ps((float *)&fxyz[8+4*(j+k1+lj)],b);
_mm512_store_ps((float *)&fxyz[16+4*(j+k1+lj)],c);
_mm512_store_ps((float *)&fxyz[24+4*(j+k1+lj)],d);
/* wp += at1*(q[j+kj+lj]*conjf(q[j+kj+lj]) */
/* + q[j+k1+lj]*conjf(q[j+k1+lj])); */
v_zt3 = _mm512_mul_ps(v_zt1,v_zt1);
v_zt3 = _mm512_add_ps(v_zt3,_mm512_mul_ps(v_zt2,v_zt2));
v_zt3 = _mm512_mul_ps(v_at1,v_zt3);
/* zt1 = cimagf(q[j+kj+l1]) - crealf(q[j+kj+l1])*_Complex_I; */
v_zt1 = _mm512_load_ps((float *)&q[j+kj+l1]);
v_zt1 = _mm512_mask_sub_ps(v_zt1,_mm512_int2mask(21845),
v_zero,v_zt1);
v_zt1 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,177);
/* zt2 = cimagf(q[j+k1+l1]) - crealf(q[j+k1+l1])*_Complex_I; */
v_zt2 = _mm512_load_ps((float *)&q[j+k1+l1]);
v_zt2 = _mm512_mask_sub_ps(v_zt2,_mm512_int2mask(21845),
v_zero,v_zt2);
v_zt2 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt2,177);
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(3),
v_zero);
v_zt2 = _mm512_mask_mov_ps(v_zt2,_mm512_int2mask(3),
v_zero);
}
/* fxyz[4*(j+kj+l1)] = at2*zt1; */
/* fxyz[1+4*(j+kj+l1)] = at3*zt1; */
/* fxyz[2+4*(j+kj+l1)] = -at4*zt1; */
a = _mm512_mul_ps(v_at2,v_zt1);
b = _mm512_mul_ps(v_at3,v_zt1);
c = _mm512_sub_ps(v_zero,_mm512_mul_ps(v_at4,v_zt1));
/* perform 4x16 transpose for fxyz field components */
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280),
c,78);
f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255),
a,78);
g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280),
v_zero,78);
h = _mm512_mask_permute4f128_ps(v_zero,
_mm512_int2mask(255),b,78);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),
g,177);
b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855),
e,177);
c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680),
h,177);
d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855),
f,177);
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
_mm512_store_ps((float *)&fxyz[4*(j+kj+l1)],a);
_mm512_store_ps((float *)&fxyz[8+4*(j+kj+l1)],b);
_mm512_store_ps((float *)&fxyz[16+4*(j+kj+l1)],c);
_mm512_store_ps((float *)&fxyz[24+4*(j+kj+l1)],d);
/* fxyz[4*(j+k1+l1)] = at2*zt2; */
/* fxyz[1+4*(j+k1+l1)] = -at3*zt2; */
/* fxyz[2+4*(j+k1+l1)] = -at4*zt2; */
a = _mm512_mul_ps(v_at2,v_zt2);
b = _mm512_sub_ps(v_zero,_mm512_mul_ps(v_at3,v_zt2));
c = _mm512_sub_ps(v_zero,_mm512_mul_ps(v_at4,v_zt2));
/* perform 4x16 transpose for fxyz field components */
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280),
c,78);
f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255),
a,78);
g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280),
v_zero,78);
h = _mm512_mask_permute4f128_ps(v_zero,
_mm512_int2mask(255),b,78);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),
g,177);
b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855),
e,177);
c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680),
h,177);
d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855),
f,177);
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
_mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],a);
_mm512_store_ps((float *)&fxyz[8+4*(j+k1+l1)],b);
_mm512_store_ps((float *)&fxyz[16+4*(j+k1+l1)],c);
_mm512_store_ps((float *)&fxyz[24+4*(j+k1+l1)],d);
/* wp += at1*(q[j+kj+l1]*conjf(q[j+kj+l1]) */
/* + q[j+k1+l1]*conjf(q[j+k1+l1])); */
v_zt4 = _mm512_mul_ps(v_zt1,v_zt1);
v_zt4 = _mm512_add_ps(v_zt4,_mm512_mul_ps(v_zt2,v_zt2));
v_zt3 = _mm512_add_ps(v_zt3,_mm512_mul_ps(v_at1,v_zt4));
/* convert to double precision before accumulating */
v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt3));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt3,78));
v_wp = _mm512_add_pd(v_wp,v_d);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
at1 = crealf(ffc[j+kk+ll])*cimagf(ffc[j+kk+ll]);
at2 = at1*dnx*(float) j;
at3 = dky*at1;
at4 = dkz*at1;
zt1 = cimagf(q[j+kj+lj]) - crealf(q[j+kj+lj])*_Complex_I;
zt2 = cimagf(q[j+k1+lj]) - crealf(q[j+k1+lj])*_Complex_I;
fxyz[4*(j+kj+lj)] = at2*zt1;
fxyz[1+4*(j+kj+lj)] = at3*zt1;
fxyz[2+4*(j+kj+lj)] = at4*zt1;
fxyz[4*(j+k1+lj)] = at2*zt2;
fxyz[1+4*(j+k1+lj)] = -at3*zt2;
fxyz[2+4*(j+k1+lj)] = at4*zt2;
zt1 = cimagf(q[j+kj+l1]) - crealf(q[j+kj+l1])*_Complex_I;
zt2 = cimagf(q[j+k1+l1]) - crealf(q[j+k1+l1])*_Complex_I;
fxyz[4*(j+kj+l1)] = at2*zt1;
fxyz[1+4*(j+kj+l1)] = at3*zt1;
fxyz[2+4*(j+kj+l1)] = -at4*zt1;
fxyz[4*(j+k1+l1)] = at2*zt2;
fxyz[1+4*(j+k1+l1)] = -at3*zt2;
fxyz[2+4*(j+k1+l1)] = -at4*zt2;
at1 = at1*(q[j+kj+lj]*conjf(q[j+kj+lj])
+ q[j+k1+lj]*conjf(q[j+k1+lj])
+ q[j+kj+l1]*conjf(q[j+kj+l1])
+ q[j+k1+l1]*conjf(q[j+k1+l1]));
wp += (double) at1;
}
}
/* mode numbers kx = 0, nx/2 */
for (k = 1; k < nyh; k++) {
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
at1 = crealf(ffc[kk+ll])*cimagf(ffc[kk+ll]);
at3 = at1*dny*(float) k;
at4 = dkz*at1;
zt1 = cimagf(q[kj+lj]) - crealf(q[kj+lj])*_Complex_I;
zt2 = cimagf(q[kj+l1]) - crealf(q[kj+l1])*_Complex_I;
fxyz[4*(kj+lj)] = zero;
fxyz[1+4*(kj+lj)] = at3*zt1;
fxyz[2+4*(kj+lj)] = at4*zt1;
fxyz[4*(k1+lj)] = zero;
fxyz[1+4*(k1+lj)] = zero;
fxyz[2+4*(k1+lj)] = zero;
fxyz[4*(kj+l1)] = zero;
fxyz[1+4*(kj+l1)] = at3*zt2;
fxyz[2+4*(kj+l1)] = -at4*zt2;
fxyz[4*(k1+l1)] = zero;
fxyz[1+4*(k1+l1)] = zero;
fxyz[2+4*(k1+l1)] = zero;
at1 = at1*(q[kj+lj]*conjf(q[kj+lj])
+ q[kj+l1]*conjf(q[kj+l1]));
wp += (double) at1;
}
/* mode numbers ky = 0, ny/2 */
k1 = nxvh*nyh;
/* vector loop over elements in blocks of 8 */
for (j = 0; j < nxhs; j+=8) {
/* at1 = crealf(ffc[j+ll])*cimagf(ffc[j+ll]); */
v_at1 = _mm512_load_ps((float *)&ffc[j+ll]);
v_at2 = (__m512)_mm512_shuffle_epi32((__m512i)v_at1,177);
v_at1 = _mm512_mul_ps(v_at1,v_at2);
/* at2 = at1*dnx*(float) j; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_at2 = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_at2 = _mm512_mul_ps(v_at1,_mm512_mul_ps(v_dnx,v_at2));
/* at4 = dkz*at1; */
v_at4 = _mm512_mul_ps(v_dkz,v_at1);
/* zt1 = cimagf(q[j+lj]) - crealf(q[j+lj])*_Complex_I; */
v_zt1 = _mm512_load_ps((float *)&q[j+lj]);
v_zt1 = _mm512_mask_sub_ps(v_zt1,_mm512_int2mask(21845),
v_zero,v_zt1);
v_zt1 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,177);
/* zt2 = cimagf(q[j+l1]) - crealf(q[j+l1])*_Complex_I; */
v_zt2 = _mm512_load_ps((float *)&q[j+l1]);
v_zt2 = _mm512_mask_sub_ps(v_zt2,_mm512_int2mask(21845),
v_zero,v_zt2);
v_zt2 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt2,177);
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(3),
v_zero);
v_zt2 = _mm512_mask_mov_ps(v_zt2,_mm512_int2mask(3),
v_zero);
}
/* fxyz[4*(j+lj)] = at2*zt1; */
/* fxyz[1+4*(j+lj)] = zero; */
/* fxyz[2+4*(j+lj)] = at4*zt1; */
a = _mm512_mul_ps(v_at2,v_zt1);
b = v_zero;
c = _mm512_mul_ps(v_at4,v_zt1);
/* perform 4x16 transpose for fxyz field components */
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280),c,
78);
f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255),a,
78);
g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280),
v_zero,78);
h = _mm512_mask_permute4f128_ps(v_zero,_mm512_int2mask(255),
b,78);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),g,
177);
b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855),e,
177);
c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680),h,
177);
d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855),f,
177);
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
_mm512_store_ps((float *)&fxyz[4*(j+lj)],a);
_mm512_store_ps((float *)&fxyz[8+4*(j+lj)],b);
_mm512_store_ps((float *)&fxyz[16+4*(j+lj)],c);
_mm512_store_ps((float *)&fxyz[24+4*(j+lj)],d);
/* fxyz[4*(j+k1+lj)] = zero; */
/* fxyz[1+4*(j+k1+lj)] = zero; */
/* fxyz[2+4*(j+k1+lj)] = zero; */
_mm512_store_ps((float *)&fxyz[4*(j+k1+lj)],v_zero);
_mm512_store_ps((float *)&fxyz[8+4*(j+k1+lj)],v_zero);
_mm512_store_ps((float *)&fxyz[16+4*(j+k1+lj)],v_zero);
_mm512_store_ps((float *)&fxyz[24+4*(j+k1+lj)],v_zero);
/* fxyz[4*(j+l1)] = at2*zt2; */
/* fxyz[1+4*(j+l1)] = zero; */
/* fxyz[2+4*(j+l1)] = -at4*zt2; */
a = _mm512_mul_ps(v_at2,v_zt2);
b = v_zero;
c = _mm512_sub_ps(v_zero,_mm512_mul_ps(v_at4,v_zt2));
/* perform 4x16 transpose for fxyz field components */
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280),c,
78);
f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255),a,
78);
g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280),
v_zero,78);
h = _mm512_mask_permute4f128_ps(v_zero,_mm512_int2mask(255),
b,78);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),g,
177);
b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855),e,
177);
c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680),h,
177);
d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855),f,
177);
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
_mm512_store_ps((float *)&fxyz[4*(j+l1)],a);
_mm512_store_ps((float *)&fxyz[8+4*(j+l1)],b);
_mm512_store_ps((float *)&fxyz[16+4*(j+l1)],c);
_mm512_store_ps((float *)&fxyz[24+4*(j+l1)],d);
/* fxyz[4*(j+k1+l1)] = zero; */
/* fxyz[1+4*(j+k1+l1)] = zero; */
/* fxyz[2+4*(j+k1+l1)] = zero; */
_mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zero);
_mm512_store_ps((float *)&fxyz[8+4*(j+k1+l1)],v_zero);
_mm512_store_ps((float *)&fxyz[16+4*(j+k1+l1)],v_zero);
_mm512_store_ps((float *)&fxyz[24+4*(j+k1+l1)],v_zero);
/* wp += at1*(q[j+lj]*conjf(q[j+lj]) */
/* + q[j+l1]*conjf(q[j+l1])); */
v_zt3 = _mm512_mul_ps(v_zt1,v_zt1);
v_zt3 = _mm512_add_ps(v_zt3,_mm512_mul_ps(v_zt2,v_zt2));
v_zt3 = _mm512_mul_ps(v_at1,v_zt3);
/* convert to double precision before accumulating */
v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt3));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt3,78));
v_wp = _mm512_add_pd(v_wp,v_d);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
at1 = crealf(ffc[j+ll])*cimagf(ffc[j+ll]);
at2 = at1*dnx*(float) j;
at4 = dkz*at1;
zt1 = cimagf(q[j+lj]) - crealf(q[j+lj])*_Complex_I;
zt2 = cimagf(q[j+l1]) - crealf(q[j+l1])*_Complex_I;
fxyz[4*(j+lj)] = at2*zt1;
fxyz[1+4*(j+lj)] = zero;
fxyz[2+4*(j+lj)] = at4*zt1;
fxyz[4*(j+k1+lj)] = zero;
fxyz[1+4*(j+k1+lj)] = zero;
fxyz[2+4*(j+k1+lj)] = zero;
fxyz[4*(j+l1)] = at2*zt2;
fxyz[1+4*(j+l1)] = zero;
fxyz[2+4*(j+l1)] = -at4*zt2;
fxyz[4*(j+k1+l1)] = zero;
fxyz[1+4*(j+k1+l1)] = zero;
fxyz[2+4*(j+k1+l1)] = zero;
at1 = at1*(q[j+lj]*conjf(q[j+lj])
+ q[j+l1]*conjf(q[j+l1]));
wp += (double) at1;
}
/* mode numbers kx = 0, nx/2 */
at1 = crealf(ffc[ll])*cimagf(ffc[ll]);
at4 = dkz*at1;
zt1 = cimagf(q[lj]) - crealf(q[lj])*_Complex_I;
fxyz[4*lj] = zero;
fxyz[1+4*lj] = zero;
fxyz[2+4*lj] = at4*zt1;
fxyz[4*(k1+lj)] = zero;
fxyz[1+4*(k1+lj)] = zero;
fxyz[2+4*(k1+lj)] = zero;
fxyz[4*l1] = zero;
fxyz[1+4*l1] = zero;
fxyz[2+4*l1] = zero;
fxyz[4*(k1+l1)] = zero;
fxyz[1+4*(k1+l1)] = zero;
fxyz[2+4*(k1+l1)] = zero;
at1 = at1*(q[lj]*conjf(q[lj]));
wp += (double) at1;
/* sum1 += wp; */
_mm512_store_pd(&dd[0],v_wp);
for (j = 1; j < 8; j++) {
dd[0] += dd[j];
}
sum1 += (wp + dd[0]);
}
}
/* mode numbers kz = 0, nz/2 */
l1 = nxvyh*nzh;
sum2 = 0.0;
#pragma omp parallel for \
private(j,k,k1,kk,kj,dky,at1,at2,at3,zt1,zt2,wp) \
reduction(+:sum2)
for (k = 1; k < nyh; k++) {
dky = dny*(float) k;
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
wp = 0.0;
for (j = 1; j < nxh; j++) {
at1 = crealf(ffc[j+kk])*cimagf(ffc[j+kk]);
at2 = at1*dnx*(float) j;
at3 = dky*at1;
zt1 = cimagf(q[j+kj]) - crealf(q[j+kj])*_Complex_I;
zt2 = cimagf(q[j+k1]) - crealf(q[j+k1])*_Complex_I;
fxyz[4*(j+kj)] = at2*zt1;
fxyz[1+4*(j+kj)] = at3*zt1;
fxyz[2+4*(j+kj)] = zero;
fxyz[4*(j+k1)] = at2*zt2;
fxyz[1+4*(j+k1)] = -at3*zt2;
fxyz[2+4*(j+k1)] = zero;
fxyz[4*(j+kj+l1)] = zero;
fxyz[1+4*(j+kj+l1)] = zero;
fxyz[2+4*(j+kj+l1)] = zero;
fxyz[4*(j+k1+l1)] = zero;
fxyz[1+4*(j+k1+l1)] = zero;
fxyz[2+4*(j+k1+l1)] = zero;
at1 = at1*(q[j+kj]*conjf(q[j+kj]) + q[j+k1]*conjf(q[j+k1]));
wp += (double) at1;
}
sum2 += wp;
}
/* mode numbers kx = 0, nx/2 */
wp = 0.0;
v_wp = _mm512_setzero_pd();
for (k = 1; k < nyh; k++) {
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
at1 = crealf(ffc[kk])*cimagf(ffc[kk]);
at3 = at1*dny*(float) k;
zt1 = cimagf(q[kj]) - crealf(q[kj])*_Complex_I;
fxyz[4*kj] = zero;
fxyz[1+4*kj] = at3*zt1;
fxyz[2+4*kj] = zero;
fxyz[4*k1] = zero;
fxyz[1+4*k1] = zero;
fxyz[2+4*k1] = zero;
fxyz[4*(kj+l1)] = zero;
fxyz[1+4*(kj+l1)] = zero;
fxyz[2+4*(kj+l1)] = zero;
fxyz[4*(k1+l1)] = zero;
fxyz[1+4*(k1+l1)] = zero;
fxyz[2+4*(k1+l1)] = zero;
at1 = at1*(q[kj]*conjf(q[kj]));
wp += (double) at1;
}
/* mode numbers ky = 0, ny/2 */
k1 = nxvh*nyh;
/* vector loop over elements in blocks of 8 */
for (j = 0; j < nxhs; j+=8) {
/* at1 = crealf(ffc[j])*cimagf(ffc[j]); */
v_at1 = _mm512_load_ps((float *)&ffc[j]);
v_at2 = (__m512)_mm512_shuffle_epi32((__m512i)v_at1,177);
v_at1 = _mm512_mul_ps(v_at1,v_at2);
/* at2 = at1*dnx*(float) j; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_at2 = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_at2 = _mm512_mul_ps(v_at1,_mm512_mul_ps(v_dnx,v_at2));
/* zt1 = cimagf(q[j]) - crealf(q[j])*_Complex_I; */
v_zt1 = _mm512_load_ps((float *)&q[j]);
v_zt1 = _mm512_mask_sub_ps(v_zt1,_mm512_int2mask(21845),v_zero,
v_zt1);
v_zt1 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,177);
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(3),v_zero);
}
/* fxyz[4*j] = at2*zt1; */
/* fxyz[1+4*j] = zero; */
/* fxyz[2+4*j] = zero; */
a = _mm512_mul_ps(v_at2,v_zt1);
b = v_zero;
c = v_zero;
/* perform 4x16 transpose for fxyz field components */
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280),c,78);
f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255),a,78);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),v_zero,
177);
b = _mm512_mask_permute4f128_ps(v_zero,_mm512_int2mask(3855),e,
177);
c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680),v_zero,
177);
d = _mm512_mask_permute4f128_ps(v_zero,_mm512_int2mask(3855),f,
177);
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
_mm512_store_ps((float *)&fxyz[4*j],a);
_mm512_store_ps((float *)&fxyz[8+4*j],b);
_mm512_store_ps((float *)&fxyz[16+4*j],c);
_mm512_store_ps((float *)&fxyz[24+4*j],d);
/* fxyz[4*(j+k1)] = zero; */
/* fxyz[1+4*(j+k1)] = zero; */
/* fxyz[2+4*(j+k1)] = zero; */
_mm512_store_ps((float *)&fxyz[4*(j+k1)],v_zero);
_mm512_store_ps((float *)&fxyz[8+4*(j+k1)],v_zero);
_mm512_store_ps((float *)&fxyz[16+4*(j+k1)],v_zero);
_mm512_store_ps((float *)&fxyz[24+4*(j+k1)],v_zero);
/* fxyz[4*(j+l1)] = zero; */
/* fxyz[1+4*(j+l1)] = zero; */
/* fxyz[2+4*(j+l1)] = zero; */
_mm512_store_ps((float *)&fxyz[4*(j+l1)],v_zero);
_mm512_store_ps((float *)&fxyz[8+4*(j+l1)],v_zero);
_mm512_store_ps((float *)&fxyz[16+4*(j+l1)],v_zero);
_mm512_store_ps((float *)&fxyz[24+4*(j+l1)],v_zero);
/* fxyz[4*(j+k1+l1)] = zero; */
/* fxyz[1+4*(j+k1+l1)] = zero; */
/* fxyz[2+4*(j+k1+l1)] = zero; */
_mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zero);
_mm512_store_ps((float *)&fxyz[8+4*(j+k1+l1)],v_zero);
_mm512_store_ps((float *)&fxyz[16+4*(j+k1+l1)],v_zero);
_mm512_store_ps((float *)&fxyz[24+4*(j+k1+l1)],v_zero);
/* wp += at1*(q[j]*conjf(q[j])); */
v_zt3 = _mm512_mul_ps(v_at1,_mm512_mul_ps(v_zt1,v_zt1));
/* convert to double precision before accumulating */
v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt3));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt3,78));
v_wp = _mm512_add_pd(v_wp,v_d);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
at1 = crealf(ffc[j])*cimagf(ffc[j]);
at2 = at1*dnx*(float) j;
zt1 = cimagf(q[j]) - crealf(q[j])*_Complex_I;
fxyz[4*j] = at2*zt1;
fxyz[1+4*j] = zero;
fxyz[2+4*j] = zero;
fxyz[4*(j+k1)] = zero;
fxyz[1+4*(j+k1)] = zero;
fxyz[2+4*(j+k1)] = zero;
fxyz[4*(j+l1)] = zero;
fxyz[1+4*(j+l1)] = zero;
fxyz[2+4*(j+l1)] = zero;
fxyz[4*(j+k1+l1)] = zero;
fxyz[1+4*(j+k1+l1)] = zero;
fxyz[2+4*(j+k1+l1)] = zero;
at1 = at1*(q[j]*conjf(q[j]));
wp += (double) at1;
}
fxyz[0] = zero;
fxyz[1] = zero;
fxyz[2] = zero;
fxyz[4*k1] = zero;
fxyz[1+4*k1] = zero;
fxyz[2+4*k1] = zero;
fxyz[4*l1] = zero;
fxyz[1+4*l1] = zero;
fxyz[2+4*l1] = zero;
fxyz[4*(k1+l1)] = zero;
fxyz[1+4*(k1+l1)] = zero;
fxyz[2+4*(k1+l1)] = zero;
/* sum2 += wp; */
_mm512_store_pd(&dd[0],v_wp);
for (j = 1; j < 8; j++) {
dd[0] += dd[j];
}
sum2 += (wp + dd[0]);
/* *we = wp*((float) nx)*((float) ny)*((float) nz); */
*we = (sum1 + sum2)*((float) nx)*((float) ny)*((float) nz);
return;
}
/*--------------------------------------------------------------------*/
void cknccuperp3(float complex cu[], int nx, int ny, int nz, int nxvh,
int nyv, int nzv) {
/* this subroutine calculates the transverse current in fourier space
input: all, output: cu
approximate flop count is:
100*nxc*nyc*nzc + 36*(nxc*nyc + nxc*nzc + nyc*nzc)
and (nx/2)*nyc*nzc divides
where nxc = nx/2 - 1, nyc = ny/2 - 1, nzc = nz/2 - 1
the transverse current is calculated using the equation:
cux[kz][ky][kx] = cux[kz][ky][kx]
- kx*(kx*cux[kz][ky][kx]+ky*cuy[kz][ky][kx]
+ kz*cuz[kz][ky][kx])/(kx*kx+ky*ky+kz*kz)
cuy([kz][ky][kx] = cuy[kz][ky][kx]
- ky*(kx*cux[kz][ky][kx]+ky*cuy[kz][ky][kx]
+ kz*cuz[kz][ky][kx])/(kx*kx+ky*ky+kz*kz)
cuz[kz][ky][kx] = cuz[kz][ky][kx]
- kz*(kx*cux[kz][ky][kx]+ky*cuy[kz][ky][kx]
+ kz*cuz[kz][ky][kx])/(kx*kx+ky*ky+kz*kz)
where kx = 2pi*j/nx, ky = 2pi*k/ny, kz = 2pi*l/nz, and
j,k,l = fourier mode numbers, except for
cux(kx=pi) = cuy(kx=pi) = cuz(kx=pi) = 0,
cux(ky=pi) = cuy(ky=pi) = cux(ky=pi) = 0,
cux(kz=pi) = cuy(kz=pi) = cuz(kz=pi) = 0,
cux(kx=0,ky=0,kz=0) = cuy(kx=0,ky=0,kz=0) = cuz(kx=0,ky=0,kz=0) = 0.
cu[l][k][j][i] = complex current density for fourier mode (j,k,l)
nx/ny/nz = system length in x/y/z direction
nxvh = second dimension of field arrays, must be >= nxh
nyv = third dimension of field arrays, must be >= ny
nzv = fourth dimension of field arrays, must be >= nz
requires KNC, cu need to be 64 byte aligned
nxhd needs to be a multiple of 8
nxvh needs to be a multiple of 2
cu needs to have 4 components
local data */
int nxh, nyh, nzh, nxhs, itn, j, k, l, k1, l1, kj, lj, nxvyh;
float dnx, dny, dnz, dkx, dky, dkz, dky2, dkz2, dkyz2, at1;
float complex zero, zt1;
__m512i v_j, v_it;
__m512 v_dnx, v_dny, v_dnz, v_dkx, v_dky, v_dkz, v_dkz2, v_dkyz2;
__m512 v_dk, v_at1, v_zt1, v_zt2, v_zero, v_one, v_at, v_as;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
nzh = 1 > nz/2 ? 1 : nz/2;
nxhs = 2*(nxh/2);
itn = 1 > nxhs ? 1 : nxhs;
nxvyh = nxvh*nyv;
dnx = 6.28318530717959/(float) nx;
dny = 6.28318530717959/(float) ny;
dnz = 6.28318530717959/(float) nz;
zero = 0.0 + 0.0*_Complex_I;
v_j = _mm512_set_epi32(1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0);
v_dnx = _mm512_set1_ps(dnx);
v_dny = _mm512_set1_ps(dny);
v_dnz = _mm512_set1_ps(dnz);
v_zero = _mm512_setzero_ps();
v_one = _mm512_set1_ps(1.0f);
/* calculate transverse part of current */
/* mode numbers 0 < kx < nx/2, 0 < ky < ny/2, and 0 < kz < nz/2 */
for (l = 1; l < nzh; l++) {
dkz = dnz*(float) l;
v_dkz = _mm512_cvtfxpnt_round_adjustepi32_ps(_mm512_set1_epi32(l),
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dkz = _mm512_mul_ps(v_dnz,v_dkz);
lj = nxvyh*l;
l1 = nxvyh*nz - lj;
dkz2 = dkz*dkz;
v_dkz2 = _mm512_set1_ps(dkz2);
/* add kz to gradient operator */
v_dk = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(12336),v_dkz);
for (k = 1; k < nyh; k++) {
dky = dny*(float) k;
v_it = _mm512_set1_epi32(k);
v_dky = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dky = _mm512_mul_ps(v_dny,v_dky);
kj = nxvh*k;
k1 = nxvh*ny - kj;
dkyz2 = dky*dky + dkz2;
v_dkyz2 = _mm512_fmadd_ps(v_dky,v_dky,v_dkz2);
/* add ky to gradient operator */
v_dk = _mm512_mask_mov_ps(v_dk,_mm512_int2mask(3084),v_dky);
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* dkx = dnx*(float) j; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dkx = _mm512_mul_ps(v_dnx,v_dkx);
/* at1 = 1.0/(dkx*dkx + dkyz2); */
v_at1 = _mm512_fmadd_ps(v_dkx,v_dkx,v_dkyz2);
v_at1 = _mm512_div_ps(v_one,v_at1);
/* add kx to gradient operator */
v_dk = _mm512_mask_mov_ps(v_dk,_mm512_int2mask(771),v_dkx);
/* zt1 = at1*(dkx*cu[4*(j+kj+lj)] + dky*cu[1+4*(j+kj+lj)] */
/* + dkz*cu[2+4*(j+kj+lj)]); */
v_zt2 = _mm512_load_ps((float *)&cu[4*(j+kj+lj)]);
v_zt1 = _mm512_mul_ps(v_dk,v_zt2);
v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,78);
v_zt1 = _mm512_add_ps(v_at,v_zt1);
v_at = _mm512_permute4f128_ps(v_zt1,177);
v_zt1 = _mm512_mul_ps(v_at1,_mm512_add_ps(v_at,v_zt1));
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),
v_zero);
}
/* cu[4*(j+kj+lj)] -= dkx*zt1; */
/* cu[1+4*(j+kj+lj)] -= dky*zt1; */
/* cu[2+4*(j+kj+lj)] -= dkz*zt1; */
v_zt2 = _mm512_sub_ps(v_zt2,_mm512_mul_ps(v_dk,v_zt1));
_mm512_store_ps((float *)&cu[4*(j+kj+lj)],v_zt2);
/* zt1 = at1*(dkx*cu[4*(j+k1+lj)] - dky*cu[1+4*(j+k1+lj)] */
/* + dkz*cu[2+4*(j+k1+lj)]); */
v_zt2 = _mm512_load_ps((float *)&cu[4*(j+k1+lj)]);
v_as = _mm512_mask_sub_ps(v_dk,_mm512_int2mask(3084),v_zero,
v_dk);
v_zt1 = _mm512_mul_ps(v_as,v_zt2);
v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,78);
v_zt1 = _mm512_add_ps(v_at,v_zt1);
v_at = _mm512_permute4f128_ps(v_zt1,177);
v_zt1 = _mm512_mul_ps(v_at1,_mm512_add_ps(v_at,v_zt1));
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),
v_zero);
}
/* cu[4*(j+k1+lj)] -= dkx*zt1; */
/* cu[1+4*(j+k1+lj)] += dky*zt1; */
/* cu[2+4*(j+k1+lj)] -= dkz*zt1; */
v_zt2 = _mm512_sub_ps(v_zt2,_mm512_mul_ps(v_as,v_zt1));
_mm512_store_ps((float *)&cu[4*(j+k1+lj)],v_zt2);
/* zt1 = at1*(dkx*cu[4*(j+kj+l1)] + dky*cu[1+4*(j+kj+l1)] */
/* - dkz*cu[2+4*(j+kj+l1)]); */
v_zt2 = _mm512_load_ps((float *)&cu[4*(j+kj+l1)]);
v_as = _mm512_mask_sub_ps(v_dk,_mm512_int2mask(12336),
v_zero,v_dk);
v_zt1 = _mm512_mul_ps(v_as,v_zt2);
v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,78);
v_zt1 = _mm512_add_ps(v_at,v_zt1);
v_at = _mm512_permute4f128_ps(v_zt1,177);
v_zt1 = _mm512_mul_ps(v_at1,_mm512_add_ps(v_at,v_zt1));
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),
v_zero);
}
/* cu[4*(j+kj+l1)] -= dkx*zt1; */
/* cu[1+4*(j+kj+l1)] -= dky*zt1; */
/* cu[2+4*(j+kj+l1)] += dkz*zt1; */
v_zt2 = _mm512_sub_ps(v_zt2,_mm512_mul_ps(v_as,v_zt1));
_mm512_store_ps((float *)&cu[4*(j+kj+l1)],v_zt2);
/* zt1 = at1*(dkx*cu[4*(j+k1+l1)] - dky*cu[1+4*(j+k1+l1)] */
/* - dkz*cu[2+4*(j+k1+l1)]); */
v_zt2 = _mm512_load_ps((float *)&cu[4*(j+k1+l1)]);
v_as = _mm512_mask_sub_ps(v_dk,_mm512_int2mask(15420),
v_zero,v_dk);
v_zt1 = _mm512_mul_ps(v_as,v_zt2);
v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,78);
v_zt1 = _mm512_add_ps(v_at,v_zt1);
v_at = _mm512_permute4f128_ps(v_zt1,177);
v_zt1 = _mm512_mul_ps(v_at1,_mm512_add_ps(v_at,v_zt1));
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),
v_zero);
}
/* cu[4*(j+k1+l1)] -= dkx*zt1; */
/* cu[1+4*(j+k1+l1)] += dky*zt1; */
/* cu[2+4*(j+k1+l1)] += dkz*zt1; */
v_zt2 = _mm512_sub_ps(v_zt2,_mm512_mul_ps(v_as,v_zt1));
_mm512_store_ps((float *)&cu[4*(j+k1+l1)],v_zt2);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
dkx = dnx*(float) j;
at1 = 1.0/(dkx*dkx + dkyz2);
zt1 = at1*(dkx*cu[4*(j+kj+lj)] + dky*cu[1+4*(j+kj+lj)]
+ dkz*cu[2+4*(j+kj+lj)]);
cu[4*(j+kj+lj)] -= dkx*zt1;
cu[1+4*(j+kj+lj)] -= dky*zt1;
cu[2+4*(j+kj+lj)] -= dkz*zt1;
zt1 = at1*(dkx*cu[4*(j+k1+lj)] - dky*cu[1+4*(j+k1+lj)]
+ dkz*cu[2+4*(j+k1+lj)]);
cu[4*(j+k1+lj)] -= dkx*zt1;
cu[1+4*(j+k1+lj)] += dky*zt1;
cu[2+4*(j+k1+lj)] -= dkz*zt1;
zt1 = at1*(dkx*cu[4*(j+kj+l1)] + dky*cu[1+4*(j+kj+l1)]
- dkz*cu[2+4*(j+kj+l1)]);
cu[4*(j+kj+l1)] -= dkx*zt1;
cu[1+4*(j+kj+l1)] -= dky*zt1;
cu[2+4*(j+kj+l1)] += dkz*zt1;
zt1 = at1*(dkx*cu[4*(j+k1+l1)] - dky*cu[1+4*(j+k1+l1)]
- dkz*cu[2+4*(j+k1+l1)]);
cu[4*(j+k1+l1)] -= dkx*zt1;
cu[1+4*(j+k1+l1)] += dky*zt1;
cu[2+4*(j+k1+l1)] += dkz*zt1;
}
}
/* mode numbers kx = 0, nx/2 */
for (k = 1; k < nyh; k++) {
kj = nxvh*k;
k1 = nxvh*ny - kj;
dky = dny*(float) k;
at1 = 1.0/(dky*dky + dkz2);
zt1 = at1*(dky*cu[1+4*(kj+lj)] + dkz*cu[2+4*(kj+lj)]);
cu[1+4*(kj+lj)] -= dky*zt1;
cu[2+4*(kj+lj)] -= dkz*zt1;
cu[4*(k1+lj)] = zero;
cu[1+4*(k1+lj)] = zero;
cu[2+4*(k1+lj)] = zero;
zt1 = at1*(dky*cu[1+4*(kj+l1)] - dkz*cu[2+4*(kj+l1)]);
cu[1+4*(kj+l1)] -= dky*zt1;
cu[2+4*(kj+l1)] += dkz*zt1;
cu[4*(k1+l1)] = zero;
cu[1+4*(k1+l1)] = zero;
cu[2+4*(k1+l1)] = zero;
}
/* mode numbers ky = 0, ny/2 */
k1 = nxvh*nyh;
/* add ky to gradient operator */
v_dk = _mm512_mask_mov_ps(v_dk,_mm512_int2mask(3084),v_zero);
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* dkx = dnx*(float) j; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dkx = _mm512_mul_ps(v_dnx,v_dkx);
/* at1 = 1.0/(dkx*dkx + dkz2); */
v_at1 = _mm512_fmadd_ps(v_dkx,v_dkx,v_dkz2);
v_at1 = _mm512_div_ps(v_one,v_at1);
/* add kx to gradient operator */
v_dk = _mm512_mask_mov_ps(v_dk,_mm512_int2mask(771),v_dkx);
/* zt1 = at1*(dkx*cu[4*(j+lj)] + dkz*cu[2+4*(j+lj)]); */
v_zt2 = _mm512_load_ps((float *)&cu[4*(j+lj)]);
v_zt1 = _mm512_mul_ps(v_dk,v_zt2);
v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,78);
v_zt1 = _mm512_add_ps(v_at,v_zt1);
v_at = _mm512_permute4f128_ps(v_zt1,177);
v_zt1 = _mm512_mul_ps(v_at1,_mm512_add_ps(v_at,v_zt1));
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),
v_zero);
}
/* cu[4*(j+lj)] -= dkx*zt1; */
/* cu[2+4*(j+lj)] -= dkz*zt1; */
v_zt2 = _mm512_sub_ps(v_zt2,_mm512_mul_ps(v_dk,v_zt1));
_mm512_store_ps((float *)&cu[4*(j+lj)],v_zt2);
/* cu[4*(j+k1+lj)] = zero; */
/* cu[1+4*(j+k1+lj)] = zero; */
/* cu[2+4*(j+k1+lj)] = zero; */
_mm512_store_ps((float *)&cu[4*(j+k1+lj)],v_zero);
/* zt1 = at1*(dkx*cu[4*(j+l1)] - dkz*cu[2+4*(j+l1)]); */
v_zt2 = _mm512_load_ps((float *)&cu[4*(j+l1)]);
v_as = _mm512_mask_sub_ps(v_dk,_mm512_int2mask(12336),
v_zero,v_dk);
v_zt1 = _mm512_mul_ps(v_as,v_zt2);
v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,78);
v_zt1 = _mm512_add_ps(v_at,v_zt1);
v_at = _mm512_permute4f128_ps(v_zt1,177);
v_zt1 = _mm512_mul_ps(v_at1,_mm512_add_ps(v_at,v_zt1));
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),
v_zero);
}
/* cu[4*(j+l1)] -= dkx*zt1; */
/* cu[2+4*(j+l1)] += dkz*zt1; */
v_zt2 = _mm512_sub_ps(v_zt2,_mm512_mul_ps(v_as,v_zt1));
_mm512_store_ps((float *)&cu[4*(j+l1)],v_zt2);
/* cu[4*(j+k1+l1)] = zero; */
/* cu[1+4*(j+k1+l1)] = zero; */
/* cu[2+4*(j+k1+l1)] = zero; */
_mm512_store_ps((float *)&cu[4*(j+k1+l1)],v_zero);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
dkx = dnx*(float) j;
at1 = 1.0/(dkx*dkx + dkz2);
zt1 = at1*(dkx*cu[4*(j+lj)] + dkz*cu[2+4*(j+lj)]);
cu[4*(j+lj)] -= dkx*zt1;
cu[2+4*(j+lj)] -= dkz*zt1;
cu[4*(j+k1+lj)] = zero;
cu[1+4*(j+k1+lj)] = zero;
cu[2+4*(j+k1+lj)] = zero;
zt1 = at1*(dkx*cu[4*(j+l1)] - dkz*cu[2+4*(j+l1)]);
cu[4*(j+l1)] -= dkx*zt1;
cu[2+4*(j+l1)] += dkz*zt1;
cu[4*(j+k1+l1)] = zero;
cu[1+4*(j+k1+l1)] = zero;
cu[2+4*(j+k1+l1)] = zero;
}
/* mode numbers kx = 0, nx/2 */
cu[2+4*lj] = zero;
cu[4*(k1+lj)] = zero;
cu[1+4*(k1+lj)] = zero;
cu[2+4*(k1+lj)] = zero;
cu[4*l1] = zero;
cu[1+4*l1] = zero;
cu[2+4*l1] = zero;
cu[4*(k1+l1)] = zero;
cu[1+4*(k1+l1)] = zero;
cu[2+4*(k1+l1)] = zero;
}
/* mode numbers kz = 0, nz/2 */
l1 = nxvyh*nzh;
for (k = 1; k < nyh; k++) {
dky = dny*(float) k;
v_it = _mm512_set1_epi32(k);
v_dky = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dky = _mm512_mul_ps(v_dny,v_dky);
kj = nxvh*k;
k1 = nxvh*ny - kj;
dky2 = dky*dky;
v_dkyz2 = _mm512_mul_ps(v_dky,v_dky);
/* add ky to gradient operator */
v_dk = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(3084),v_dky);
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* dkx = dnx*(float) j; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dkx = _mm512_mul_ps(v_dnx,v_dkx);
/* at1 = 1.0/(dkx*dkx + dky2); */
v_at1 = _mm512_fmadd_ps(v_dkx,v_dkx,v_dkyz2);
v_at1 = _mm512_div_ps(v_one,v_at1);
/* add kx to gradient operator */
v_dk = _mm512_mask_mov_ps(v_dk,_mm512_int2mask(771),v_dkx);
/* zt1 = at1*(dkx*cu[4*(j+kj)] + dky*cu[1+4*(j+kj)]); */
v_zt2 = _mm512_load_ps((float *)&cu[4*(j+kj)]);
v_zt1 = _mm512_mul_ps(v_dk,v_zt2);
v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,78);
v_zt1 = _mm512_add_ps(v_at,v_zt1);
v_at = _mm512_permute4f128_ps(v_zt1,177);
v_zt1 = _mm512_mul_ps(v_at1,_mm512_add_ps(v_at,v_zt1));
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),
v_zero);
}
/* cu[4*(j+kj)] -= dkx*zt1; */
/* cu[1+4*(j+kj)] -= dky*zt1; */
v_zt2 = _mm512_sub_ps(v_zt2,_mm512_mul_ps(v_dk,v_zt1));
_mm512_store_ps((float *)&cu[4*(j+kj)],v_zt2);
/* zt1 = at1*(dkx*cu[4*(j+k1)]- dky*cu[1+4*(j+k1)]); */
v_zt2 = _mm512_load_ps((float *)&cu[4*(j+k1)]);
v_as = _mm512_mask_sub_ps(v_dk,_mm512_int2mask(3084),v_zero,
v_dk);
v_zt1 = _mm512_mul_ps(v_as,v_zt2);
v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,78);
v_zt1 = _mm512_add_ps(v_at,v_zt1);
v_at = _mm512_permute4f128_ps(v_zt1,177);
v_zt1 = _mm512_mul_ps(v_at1,_mm512_add_ps(v_at,v_zt1));
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),
v_zero);;
}
/* cu[4*(j+k1)] -= dkx*zt1; */
/* cu[1+4*(j+k1)] += dky*zt1; */
v_zt2 = _mm512_sub_ps(v_zt2,_mm512_mul_ps(v_as,v_zt1));
_mm512_store_ps((float *)&cu[4*(j+k1)],v_zt2);
/* cu[4*(j+kj+l1)] = zero; */
/* cu[1+4*(j+kj+l1)] = zero; */
/* cu[2+4*(j+kj+l1)] = zero; */
_mm512_store_ps((float *)&cu[4*(j+kj+l1)],v_zero);
/* cu[4*(j+k1+l1)] = zero; */
/* cu[1+4*(j+k1+l1)] = zero; */
/* cu[2+4*(j+k1+l1)] = zero; */
_mm512_store_ps((float *)&cu[4*(j+k1+l1)],v_zero);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
dkx = dnx*(float) j;
at1 = 1.0/(dkx*dkx + dky2);
zt1 = at1*(dkx*cu[4*(j+kj)] + dky*cu[1+4*(j+kj)]);
cu[4*(j+kj)] -= dkx*zt1;
cu[1+4*(j+kj)] -= dky*zt1;
zt1 = at1*(dkx*cu[4*(j+k1)]- dky*cu[1+4*(j+k1)]);
cu[4*(j+k1)] -= dkx*zt1;
cu[1+4*(j+k1)] += dky*zt1;
cu[4*(j+kj+l1)] = zero;
cu[1+4*(j+kj+l1)] = zero;
cu[2+4*(j+kj+l1)] = zero;
cu[4*(j+k1+l1)] = zero;
cu[1+4*(j+k1+l1)] = zero;
cu[2+4*(j+k1+l1)] = zero;
}
}
/* mode numbers kx = 0, nx/2 */
for (k = 1; k < nyh; k++) {
kj = nxvh*k;
k1 = nxvh*ny - kj;
cu[1+4*kj] = zero;
cu[4*k1] = zero;
cu[1+4*k1] = zero;
cu[2+4*k1] = zero;
cu[4*(kj+l1)] = zero;
cu[1+4*(kj+l1)] = zero;
cu[2+4*(kj+l1)] = zero;
cu[4*(k1+l1)] = zero;
cu[1+4*(k1+l1)] = zero;
cu[2+4*(k1+l1)] = zero;
}
/* mode numbers ky = 0, ny/2 */
k1 = nxvh*nyh;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
v_zt2 = _mm512_load_ps((float *)&cu[4*j]);
/* zero out kx = 0 mode */
if (j==0) {
v_zt2 = _mm512_mask_mov_ps(v_zt2,_mm512_int2mask(255),v_zero);
}
/* cu[4*j] = zero; */
v_zt2 = _mm512_mask_mov_ps(v_zt2,_mm512_int2mask(771),v_zero);
_mm512_store_ps((float *)&cu[4*j],v_zt2);
/* cu[4*(j+k1)] = zero; */
/* cu[1+4*(j+k1)] = zero; */
/* cu[2+4*(j+k1)] = zero; */
_mm512_store_ps((float *)&cu[4*(j+k1)],v_zero);
/* cu[4*(j+l1)] = zero; */
/* cu[1+4*(j+l1)] = zero; */
/* cu[2+4*(j+l1)] = zero; */
_mm512_store_ps((float *)&cu[4*(j+l1)],v_zero);
/* cu[4*(j+k1+l1)] = zero; */
/* cu[1+4*(j+k1+l1)] = zero; */
/* cu[2+4*(j+k1+l1)] = zero; */
_mm512_store_ps((float *)&cu[4*(j+k1+l1)],v_zero);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
cu[4*j] = zero;
cu[4*(j+k1)] = zero;
cu[1+4*(j+k1)] = zero;
cu[2+4*(j+k1)] = zero;
cu[4*(j+l1)] = zero;
cu[1+4*(j+l1)] = zero;
cu[2+4*(j+l1)] = zero;
cu[4*(j+k1+l1)] = zero;
cu[1+4*(j+k1+l1)] = zero;
cu[2+4*(j+k1+l1)] = zero;
}
cu[0] = zero;
cu[1] = zero;
cu[2] = zero;
cu[4*k1] = zero;
cu[1+4*k1] = zero;
cu[2+4*k1] = zero;
cu[4*l1] = zero;
cu[1+4*l1] = zero;
cu[2+4*l1] = zero;
cu[4*(k1+l1)] = zero;
cu[1+4*(k1+l1)] = zero;
cu[2+4*(k1+l1)] = zero;
return;
}
/*--------------------------------------------------------------------*/
void ckncibpois33(float complex cu[], float complex bxyz[],
float complex ffc[], float ci, float *wm, int nx,
int ny, int nz, int nxvh, int nyv, int nzv, int nxhd,
int nyhd, int nzhd) {
/* this subroutine solves 3d poisson's equation in fourier space for
magnetic field with periodic boundary conditions.
input: cu,ffc,ci,nx,ny,nz,nxvh,nyv,nzv,nxhd,nyhd,nzhd
output: bxyz, wm
approximate flop count is:
193*nxc*nyc*nzc + 84*(nxc*nyc + nxc*nzc + nyc*nzc)
where nxc = nx/2 - 1, nyc = ny/2 - 1, nzc = nz/2 - 1
the magnetic field is calculated using the equations:
bx[kz][ky][kx] = ci*ci*sqrt(-1)*g[kz][ky][kx]*
(ky*cuz[kz][ky][kx]-kz*cuy[kz][ky][kx]),
by[kz][ky][kx] = ci*ci*sqrt(-1)*g[kz][ky][kx]*
(kz*cux[kz][ky][kx]-kx*cuz[kz][ky][kx]),
bz[kz][ky][kx] = ci*ci*sqrt(-1)*g[kz][ky][kx]*
(kx*cuy[kz][ky][kx]-ky*cux[kz][ky][kx]),
where kx = 2pi*j/nx, ky = 2pi*k/ny, kz = 2pi*l/nz, and
j,k,l = fourier mode numbers,
g[kz][ky][kx] = (affp/(kx**2+ky**2+kz**2))*s(kx,ky,kz),
s[kz][ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2+(kz*az)**2)/2), except for
bx(kx=pi) = by(kx=pi) = bz(kx=pi) = 0,
bx(ky=pi) = by(ky=pi) = bx(ky=pi) = 0,
bx(kz=pi) = by(kz=pi) = bz(kz=pi) = 0,
bx(kx=0,ky=0,kz=0) = by(kx=0,ky=0,kz=0) = bz(kx=0,ky=0,kz=0) = 0.
cu[l][k][j][i] = complex current density for fourier mode (j,k,l)
bxyz[l][k][j][i] = i component of complex magnetic field
all for fourier mode (j,k,l)
aimag(ffc(j,k,l)) = finite-size particle shape factor s
for fourier mode (j,k,l)
real(ffc(j,k,l)) = potential green's function g
for fourier mode (j,k,l)
ci = reciprocal of velocity of light
magnetic field energy is also calculated, using
wm = nx*ny*nz*sum((affp/(kx**2+ky**2+kz**2))*ci*ci
|cu[kz][ky][kx]*s[kz][ky][kx]|**2)
this expression is valid only if the current is divergence-free
nx/ny/nz = system length in x/y/z direction
nxvh = second dimension of field arrays, must be >= nxh
nyv = third dimension of field arrays, must be >= ny
nzv = fourth dimension of field arrays, must be >= nz
nxhd = dimension of form factor array, must be >= nxh
nyhd = second dimension of form factor array, must be >= nyh
nzhd = third dimension of form factor array, must be >= nzh
requires KNC, cu, bxyz, ffc need to be 64 byte aligned
nxhd needs to be a multiple of 8
nxvh needs to be a multiple of 2
cu, bxyz need to have 4 components
local data */
int nxh, nyh, nzh, nxhs, itn, j, k, l, k1, l1, kk, kj, ll, lj;
int nxyhd, nxvyh;
float dnx, dny, dnz, dky, dkz, ci2, at1, at2, at3, at4;
float complex zero, zt1, zt2, zt3;
double wp, d0;
__m512i v_j, v_it, v_n, v_m;
__m512 v_dnx, v_dny, v_dnz, v_dkx, v_dky, v_dkz, v_ci2;
__m512 v_dk1, v_dk2, v_at1, v_at2, v_at3, v_at4, v_zero;
__m512 v_zt1, v_zt2, v_zt3, v_zt4;
__m512d v_wp, v_d;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
nzh = 1 > nz/2 ? 1 : nz/2;
nxhs = 2*(nxh/2);
itn = 1 > nxhs ? 1 : nxhs;
nxyhd = nxhd*nyhd;
nxvyh = nxvh*nyv;
dnx = 6.28318530717959/(float) nx;
dny = 6.28318530717959/(float) ny;
dnz = 6.28318530717959/(float) nz;
zero = 0.0 + 0.0*_Complex_I;
ci2 = ci*ci;
v_j = _mm512_set_epi32(1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0);
v_n = _mm512_set_epi32(15,14,11,10,9,8,13,12,7,6,3,2,1,0,5,4);
v_m = _mm512_set_epi32(15,14,9,8,13,12,11,10,7,6,1,0,5,4,3,2);
v_dnx = _mm512_set1_ps(dnx);
v_dny = _mm512_set1_ps(dny);
v_dnz = _mm512_set1_ps(dnz);
v_zero = _mm512_setzero_ps();
v_ci2 = _mm512_set1_ps(ci2);
/* calculate magnetic field and sum field energy */
wp = 0.0;
v_wp = _mm512_set1_pd(0.0);
/* mode numbers 0 < kx < nx/2, 0 < ky < ny/2, and 0 < kz < nz/2 */
for (l = 1; l < nzh; l++) {
dkz = dnz*(float) l;
v_dkz = _mm512_cvtfxpnt_round_adjustepi32_ps(_mm512_set1_epi32(l),
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dkz = _mm512_mul_ps(v_dnz,v_dkz);
ll = nxyhd*l;
lj = nxvyh*l;
l1 = nxvyh*nz - lj;
/* add kz to curl operators */
v_dk1 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(771),v_dkz);
v_dk2 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(3084),v_dkz);
for (k = 1; k < nyh; k++) {
dky = dny*(float) k;
v_it = _mm512_set1_epi32(k);
v_dky = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dky = _mm512_mul_ps(v_dny,v_dky);
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
/* add ky to curl operators */
v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(12336),
v_dky);
v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(771),
v_dky);
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* at1 = ci2*crealf(ffc[j+kk+ll]); */
v_at1 = _mm512_mask_loadunpacklo_ps(v_zero,
_mm512_int2mask(15),(float *)&ffc[j+kk+ll]);
v_at1 = _mm512_mask_loadunpackhi_ps(v_at1,
_mm512_int2mask(15),(float *)&ffc[j+kk+ll+8]);
v_at1 = _mm512_permute4f128_ps(v_at1,0);
v_at4 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(13260),(__m512i)v_at1,78);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at4,
_mm512_int2mask(43690),(__m512i)v_at4,177);
v_at1 = _mm512_mul_ps(v_ci2,v_at1);
/* at2 = at1*dnx*(float) j; */
/* at3 = dky*at1; */
/* at4 = dkz*at1; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dkx = _mm512_mul_ps(v_dnx,v_dkx);
/* add kx to curl operators */
v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(3084),
v_dkx);
v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(12336),
v_dkx);
/* normalize curl operators */
v_at2 = _mm512_mul_ps(v_at1,v_dk1);
v_at3 = _mm512_mul_ps(v_at1,v_dk2);
/* at1 = at1*cimagf(ffc[j+kk+ll]); */
v_at4 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at4,
_mm512_int2mask(21845),(__m512i)v_at4,177);
v_at1 = _mm512_mul_ps(v_at1,v_at4);
/* zt1 = -cimagf(cu[2+4*(j+kj+lj)]) */
/* + crealf(cu[2+4*(j+kj+lj)])*_Complex_I;/ */
/* zt2 = -cimagf(cu[1+4*(j+kj+lj)]) */
/* + crealf(cu[1+4*(j+kj+lj)])*_Complex_I; */
/* zt3 = -cimagf(cu[4*(j+kj+lj)]) */
/* + crealf(cu[4*(j+kj+lj)])*_Complex_I; */
v_zt3 = _mm512_load_ps((float *)&cu[4*(j+kj+lj)]);
v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690),
v_zero,v_zt3);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* bxyz[4*(j+kj+lj)] = at3*zt1 - at4*zt2; */
/* bxyz[1+4*(j+kj+lj)] = at4*zt3 - at2*zt1; */
/* bxyz[2+4*(j+kj+lj)] = at2*zt2 - at3*zt3; */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_sub_ps(v_zt1,v_zt2);
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),
v_zero);
v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255),
v_zero);
}
_mm512_store_ps((float *)&bxyz[4*(j+kj+lj)],v_zt1);
/* wp += at1*(cu[4*(j+kj+lj)]*conjf(cu[4*(j+kj+lj)]) */
/* + cu[1+4*(j+kj+lj)]*conjf(cu[1+4*(j+kj+lj)]) */
/* + cu[2+4*(j+kj+lj)]*conjf(cu[2+4*(j+kj+lj)])); */
v_zt4 = _mm512_mul_ps(v_at1,_mm512_mask_mul_ps(v_zero,
_mm512_int2mask(16191),v_zt3,v_zt3));
/* zt1 = -cimagf(cu[2+4*(j+k1+lj)]) */
/* + crealf(cu[2+4*(j+k1+lj)])*_Complex_I; */
/* zt2 = -cimagf(cu[1+4*(j+k1+lj)]) */
/* + crealf(cu[1+4*(j+k1+lj)])*_Complex_I; */
/* zt3 = -cimagf(cu[4*(j+k1+lj)]) */
/* + crealf(cu[4*(j+k1+lj)])*_Complex_I; */
v_zt3 = _mm512_load_ps((float *)&cu[4*(j+k1+lj)]);
v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690),
v_zero,v_zt3);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
v_zt1 = _mm512_mask_sub_ps(v_at2,_mm512_int2mask(12336),
v_zero,v_at2);
v_zt2 = _mm512_mask_sub_ps(v_at3,_mm512_int2mask(771),
v_zero,v_at3);
/* bxyz[4*(j+k1+lj)] = -at3*zt1 - at4*zt2; */
/* bxyz[1+4*(j+k1+lj)] = at4*zt3 - at2*zt1; */
/* bxyz[2+4*(j+k1+lj)] = at2*zt2 + at3*zt3; */
v_zt1 = _mm512_mul_ps(v_zt1,v_zt3);
v_zt2 = _mm512_mul_ps(v_zt2,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_sub_ps(v_zt1,v_zt2);
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),
v_zero);
v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255),
v_zero);
}
_mm512_store_ps((float *)&bxyz[4*(j+k1+lj)],v_zt1);
/* wp += at1*(cu[4*(j+k1+lj)]*conjf(cu[4*(j+k1+lj)]) */
/* + cu[1+4*(j+k1+lj)]*conjf(cu[1+4*(j+k1+lj)]) */
/* + cu[2+4*(j+k1+lj)]*conjf(cu[2+4*(j+k1+lj)])); */
v_zt4 = _mm512_fmadd_ps(v_at1,_mm512_mask_mul_ps(v_zero,
_mm512_int2mask(16191),v_zt3,v_zt3),v_zt4);
/* zt1 = -cimagf(cu[2+4*(j+kj+l1)]) */
/* + crealf(cu[2+4*(j+kj+l1)])*_Complex_I; */
/* zt2 = -cimagf(cu[1+4*(j+kj+l1)]) */
/* + crealf(cu[1+4*(j+kj+l1)])*_Complex_I; */
/* zt3 = -cimagf(cu[4*(j+kj+l1)]) */
/* + crealf(cu[4*(j+kj+l1)])*_Complex_I; */
v_zt3 = _mm512_load_ps((float *)&cu[4*(j+kj+l1)]);
v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690),
v_zero,v_zt3);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
v_zt1 = _mm512_mask_sub_ps(v_at2,_mm512_int2mask(771),
v_zero,v_at2);
v_zt2 = _mm512_mask_sub_ps(v_at3,_mm512_int2mask(3084),
v_zero,v_at3);
/* bxyz[4*(j+kj+l1)] = at3*zt1 + at4*zt2; */
/* bxyz[1+4*(j+kj+l1)] = -at4*zt3 - at2*zt1; */
/* bxyz[2+4*(j+kj+l1)] = at2*zt2 - at3*zt3; */
v_zt1 = _mm512_mul_ps(v_zt1,v_zt3);
v_zt2 = _mm512_mul_ps(v_zt2,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_sub_ps(v_zt1,v_zt2);
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),
v_zero);
v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255),
v_zero);
}
_mm512_store_ps((float *)&bxyz[4*(j+kj+l1)],v_zt1);
/* wp += at1*(cu[4*(j+kj+l1)]*conjf(cu[4*(j+kj+l1)]) */
/* + cu[1+4*(j+kj+l1)]*conjf(cu[1+4*(j+kj+l1)]) */
/* + cu[2+4*(j+kj+l1)]*conjf(cu[2+4*(j+kj+l1)])); */
v_zt4 = _mm512_fmadd_ps(v_at1,_mm512_mask_mul_ps(v_zero,
_mm512_int2mask(16191),v_zt3,v_zt3),v_zt4);
/* zt1 = -cimagf(cu[2+4*(j+k1+l1)]) */
/* + crealf(cu[2+4*(j+k1+l1)])*_Complex_I; */
/* zt2 = -cimagf(cu[1+4*(j+k1+l1)]) */
/* + crealf(cu[1+4*(j+k1+l1)])*_Complex_I; */
/* zt3 = -cimagf(cu[4*(j+k1+l1)]) */
/* + crealf(cu[4*(j+k1+l1)])*_Complex_I; */
v_zt3 = _mm512_load_ps((float *)&cu[4*(j+k1+l1)]);
v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690),
v_zero,v_zt3);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
v_zt1 = _mm512_mask_sub_ps(v_at2,_mm512_int2mask(13107),
v_zero,v_at2);
v_zt2 = _mm512_mask_sub_ps(v_at3,_mm512_int2mask(3855),
v_zero,v_at3);
/* bxyz[4*(j+k1+l1)] = -at3*zt1 + at4*zt2; */
/* bxyz[1+4*(j+k1+l1)] = -at4*zt3 - at2*zt1; */
/* bxyz[2+4*(j+k1+l1)] = at2*zt2 + at3*zt3; */
v_zt1 = _mm512_mul_ps(v_zt1,v_zt3);
v_zt2 = _mm512_mul_ps(v_zt2,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_sub_ps(v_zt1,v_zt2);
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),
v_zero);
v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255),
v_zero);
}
_mm512_store_ps((float *)&bxyz[4*(j+k1+l1)],v_zt1);
/* wp += at1*(cu[4*(j+k1+l1)]*conjf(cu[4*(j+k1+l1)]) */
/* + cu[1+4*(j+k1+l1)]*conjf(cu[1+4*(j+k1+l1)]) */
/* + cu[2+4*(j+k1+l1)]*conjf(cu[2+4*(j+k1+l1)])); */
v_zt4 = _mm512_fmadd_ps(v_at1,_mm512_mask_mul_ps(v_zero,
_mm512_int2mask(16191),v_zt3,v_zt3),v_zt4);
/* convert to double precision before accumulating */
v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt4));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt4,78));
v_wp = _mm512_add_pd(v_wp,v_d);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
at1 = ci2*crealf(ffc[j+kk+ll]);
at2 = at1*dnx*(float) j;
at3 = dky*at1;
at4 = dkz*at1;
at1 = at1*cimagf(ffc[j+kk+ll]);
zt1 = -cimagf(cu[2+4*(j+kj+lj)])
+ crealf(cu[2+4*(j+kj+lj)])*_Complex_I;
zt2 = -cimagf(cu[1+4*(j+kj+lj)])
+ crealf(cu[1+4*(j+kj+lj)])*_Complex_I;
zt3 = -cimagf(cu[4*(j+kj+lj)])
+ crealf(cu[4*(j+kj+lj)])*_Complex_I;
bxyz[4*(j+kj+lj)] = at3*zt1 - at4*zt2;
bxyz[1+4*(j+kj+lj)] = at4*zt3 - at2*zt1;
bxyz[2+4*(j+kj+lj)] = at2*zt2 - at3*zt3;
zt1 = -cimagf(cu[2+4*(j+k1+lj)])
+ crealf(cu[2+4*(j+k1+lj)])*_Complex_I;
zt2 = -cimagf(cu[1+4*(j+k1+lj)])
+ crealf(cu[1+4*(j+k1+lj)])*_Complex_I;
zt3 = -cimagf(cu[4*(j+k1+lj)])
+ crealf(cu[4*(j+k1+lj)])*_Complex_I;
bxyz[4*(j+k1+lj)] = -at3*zt1 - at4*zt2;
bxyz[1+4*(j+k1+lj)] = at4*zt3 - at2*zt1;
bxyz[2+4*(j+k1+lj)] = at2*zt2 + at3*zt3;
zt1 = -cimagf(cu[2+4*(j+kj+l1)])
+ crealf(cu[2+4*(j+kj+l1)])*_Complex_I;
zt2 = -cimagf(cu[1+4*(j+kj+l1)])
+ crealf(cu[1+4*(j+kj+l1)])*_Complex_I;
zt3 = -cimagf(cu[4*(j+kj+l1)])
+ crealf(cu[4*(j+kj+l1)])*_Complex_I;
bxyz[4*(j+kj+l1)] = at3*zt1 + at4*zt2;
bxyz[1+4*(j+kj+l1)] = -at4*zt3 - at2*zt1;
bxyz[2+4*(j+kj+l1)] = at2*zt2 - at3*zt3;
zt1 = -cimagf(cu[2+4*(j+k1+l1)])
+ crealf(cu[2+4*(j+k1+l1)])*_Complex_I;
zt2 = -cimagf(cu[1+4*(j+k1+l1)])
+ crealf(cu[1+4*(j+k1+l1)])*_Complex_I;
zt3 = -cimagf(cu[4*(j+k1+l1)])
+ crealf(cu[4*(j+k1+l1)])*_Complex_I;
bxyz[4*(j+k1+l1)] = -at3*zt1 + at4*zt2;
bxyz[1+4*(j+k1+l1)] = -at4*zt3 - at2*zt1;
bxyz[2+4*(j+k1+l1)] = at2*zt2 + at3*zt3;
at1 = at1*(cu[4*(j+kj+lj)]*conjf(cu[4*(j+kj+lj)])
+ cu[1+4*(j+kj+lj)]*conjf(cu[1+4*(j+kj+lj)])
+ cu[2+4*(j+kj+lj)]*conjf(cu[2+4*(j+kj+lj)])
+ cu[4*(j+k1+lj)]*conjf(cu[4*(j+k1+lj)])
+ cu[1+4*(j+k1+lj)]*conjf(cu[1+4*(j+k1+lj)])
+ cu[2+4*(j+k1+lj)]*conjf(cu[2+4*(j+k1+lj)])
+ cu[4*(j+kj+l1)]*conjf(cu[4*(j+kj+l1)])
+ cu[1+4*(j+kj+l1)]*conjf(cu[1+4*(j+kj+l1)])
+ cu[2+4*(j+kj+l1)]*conjf(cu[2+4*(j+kj+l1)])
+ cu[4*(j+k1+l1)]*conjf(cu[4*(j+k1+l1)])
+ cu[1+4*(j+k1+l1)]*conjf(cu[1+4*(j+k1+l1)])
+ cu[2+4*(j+k1+l1)]*conjf(cu[2+4*(j+k1+l1)]));
wp += (double) at1;
}
}
/* mode numbers kx = 0, nx/2 */
for (k = 1; k < nyh; k++) {
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
at1 = ci2*crealf(ffc[kk+ll]);
at3 = at1*dny*(float) k;
at4 = dkz*at1;
at1 = at1*cimagf(ffc[kk+ll]);
zt1 = -cimagf(cu[2+4*(kj+lj)])
+ crealf(cu[2+4*(kj+lj)])*_Complex_I;
zt2 = -cimagf(cu[1+4*(kj+lj)])
+ crealf(cu[1+4*(kj+lj)])*_Complex_I;
zt3 = -cimagf(cu[4*(kj+lj)])
+ crealf(cu[4*(kj+lj)])*_Complex_I;
bxyz[4*(kj+lj)] = at3*zt1 - at4*zt2;
bxyz[1+4*(kj+lj)] = at4*zt3;
bxyz[2+4*(kj+lj)] = -at3*zt3;
bxyz[4*(k1+lj)] = zero;
bxyz[1+4*(k1+lj)] = zero;
bxyz[2+4*(k1+lj)] = zero;
zt1 = -cimagf(cu[2+4*(kj+l1)])
+ crealf(cu[2+4*(kj+l1)])*_Complex_I;
zt2 = -cimagf(cu[1+4*(kj+l1)])
+ crealf(cu[1+4*(kj+l1)])*_Complex_I;
zt3 = -cimagf(cu[4*(kj+l1)])
+ crealf(cu[4*(kj+l1)])*_Complex_I;
bxyz[4*(kj+l1)] = at3*zt1 + at4*zt2;
bxyz[1+4*(kj+l1)] = -at4*zt3;
bxyz[2+4*(kj+l1)] = -at3*zt3;
bxyz[4*(k1+l1)] = zero;
bxyz[1+4*(k1+l1)] = zero;
bxyz[2+4*(k1+l1)] = zero;
at1 = at1*(cu[4*(kj+lj)]*conjf(cu[4*(kj+lj)])
+ cu[1+4*(kj+lj)]*conjf(cu[1+4*(kj+lj)])
+ cu[2+4*(kj+lj)]*conjf(cu[2+4*(kj+lj)])
+ cu[4*(kj+l1)]*conjf(cu[4*(kj+l1)])
+ cu[1+4*(kj+l1)]*conjf(cu[1+4*(kj+l1)])
+ cu[2+4*(kj+l1)]*conjf(cu[2+4*(kj+l1)]));
wp += (double) at1;
}
/* mode numbers ky = 0, ny/2 */
k1 = nxvh*nyh;
/* add ky to curl operators */
v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(12336),v_zero);
v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(771),v_zero);
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* at1 = ci2*crealf(ffc[j+ll]); */
v_at1 = _mm512_mask_loadunpacklo_ps(v_zero,_mm512_int2mask(15),
(float *)&ffc[j+ll]);
v_at1 = _mm512_mask_loadunpackhi_ps(v_at1,_mm512_int2mask(15),
(float *)&ffc[j+ll+8]);
v_at1 = _mm512_permute4f128_ps(v_at1,0);
v_at4 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(13260),(__m512i)v_at1,78);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at4,
_mm512_int2mask(43690),(__m512i)v_at4,177);
v_at1 = _mm512_mul_ps(v_ci2,v_at1);
/* at2 = at1*dnx*(float) j; */
/* at4 = dkz*at1; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dkx = _mm512_mul_ps(v_dnx,v_dkx);
/* add kx to curl operators */
v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(3084),
v_dkx);
v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(12336),
v_dkx);
/* normalize curl operators */
v_at2 = _mm512_mul_ps(v_at1,v_dk1);
v_at3 = _mm512_mul_ps(v_at1,v_dk2);
/* at1 = at1*cimagf(ffc[j+ll]); */
v_at4 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at4,
_mm512_int2mask(21845),(__m512i)v_at4,177);
v_at1 = _mm512_mul_ps(v_at1,v_at4);
/* zt1 = -cimagf(cu[2+4*(j+lj)]) */
/* + crealf(cu[2+4*(j+lj)])*_Complex_I; */
/* zt2 = -cimagf(cu[1+4*(j+lj)]) */
/* + crealf(cu[1+4*(j+lj)])*_Complex_I; */
/* zt3 = -cimagf(cu[4*(j+lj)]) */
/* + crealf(cu[4*(j+lj)])*_Complex_I; */
v_zt3 = _mm512_load_ps((float *)&cu[4*(j+lj)]);
v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690),v_zero,
v_zt3);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* bxyz[4*(j+lj)] = -at4*zt2; */
/* bxyz[1+4*(j+lj)] = at4*zt3 - at2*zt1; */
/* bxyz[2+4*(j+lj)] = at2*zt2; */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_sub_ps(v_zt1,v_zt2);
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),
v_zero);
v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255),
v_zero);
}
_mm512_store_ps((float *)&bxyz[4*(j+lj)],v_zt1);
/* wp += at1*(cu[4*(j+lj)]*conjf(cu[4*(j+lj)]) */
/* + cu[1+4*(j+lj)]*conjf(cu[1+4*(j+lj)]) */
/* + cu[2+4*(j+lj)]*conjf(cu[2+4*(j+lj)]) */
v_zt4 = _mm512_mul_ps(v_at1,_mm512_mask_mul_ps(v_zero,
_mm512_int2mask(16191),v_zt3,v_zt3));
/* bxyz[4*(j+k1+lj)] = zero; */
/* bxyz[1+4*(j+k1+lj)] = zero; */
/* bxyz[2+4*(j+k1+lj)] = zero; */
_mm512_store_ps((float *)&bxyz[4*(j+k1+lj)],v_zero);
/* zt1 = -cimagf(cu[2+4*(j+l1)]) */
/* + crealf(cu[2+4*(j+l1)])*_Complex_I; */
/* zt2 = -cimagf(cu[1+4*(j+l1)]) */
/* + crealf(cu[1+4*(j+l1)])*_Complex_I; */
/* zt3 = -cimagf(cu[4*(j+l1)]) */
/* + crealf(cu[4*(j+l1)])*_Complex_I; */
v_zt3 = _mm512_load_ps((float *)&cu[4*(j+l1)]);
v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690),v_zero,
v_zt3);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
v_zt1 = _mm512_mask_sub_ps(v_at2,_mm512_int2mask(771),v_zero,
v_at2);
v_zt2 = _mm512_mask_sub_ps(v_at3,_mm512_int2mask(3084),v_zero,
v_at3);
/* bxyz[4*(j+l1)] = at4*zt2; */
/* bxyz[1+4*(j+l1)] = -at4*zt3 - at2*zt1; */
/* bxyz[2+4*(j+l1)] = at2*zt2; */
v_zt1 = _mm512_mul_ps(v_zt1,v_zt3);
v_zt2 = _mm512_mul_ps(v_zt2,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_sub_ps(v_zt1,v_zt2);
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),
v_zero);
v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255),
v_zero);
}
_mm512_store_ps((float *)&bxyz[4*(j+l1)],v_zt1);
/* wp += at1*(cu[4*(j+l1)]*conjf(cu[4*(j+l1)]) */
/* + cu[1+4*(j+l1)]*conjf(cu[1+4*(j+l1)]) */
/* + cu[2+4*(j+l1)]*conjf(cu[2+4*(j+l1)])); */
v_zt4 = _mm512_fmadd_ps(v_at1,_mm512_mask_mul_ps(v_zero,
_mm512_int2mask(16191),v_zt3,v_zt3),v_zt4);
/* convert to double precision before accumulating */
v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt4));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt4,78));
v_wp = _mm512_add_pd(v_wp,v_d);
/* bxyz[4*(j+k1+l1)] = zero; */
/* bxyz[1+4*(j+k1+l1)] = zero; */
/* bxyz[2+4*(j+k1+l1)] = zero; */
_mm512_store_ps((float *)&bxyz[4*(j+k1+l1)],v_zero);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
at1 = ci2*crealf(ffc[j+ll]);
at2 = at1*dnx*(float) j;
at4 = dkz*at1;
at1 = at1*cimagf(ffc[j+ll]);
zt1 = -cimagf(cu[2+4*(j+lj)])
+ crealf(cu[2+4*(j+lj)])*_Complex_I;
zt2 = -cimagf(cu[1+4*(j+lj)])
+ crealf(cu[1+4*(j+lj)])*_Complex_I;
zt3 = -cimagf(cu[4*(j+lj)])
+ crealf(cu[4*(j+lj)])*_Complex_I;
bxyz[4*(j+lj)] = -at4*zt2;
bxyz[1+4*(j+lj)] = at4*zt3 - at2*zt1;
bxyz[2+4*(j+lj)] = at2*zt2;
bxyz[4*(j+k1+lj)] = zero;
bxyz[1+4*(j+k1+lj)] = zero;
bxyz[2+4*(j+k1+lj)] = zero;
zt1 = -cimagf(cu[2+4*(j+l1)])
+ crealf(cu[2+4*(j+l1)])*_Complex_I;
zt2 = -cimagf(cu[1+4*(j+l1)])
+ crealf(cu[1+4*(j+l1)])*_Complex_I;
zt3 = -cimagf(cu[4*(j+l1)])
+ crealf(cu[4*(j+l1)])*_Complex_I;
bxyz[4*(j+l1)] = at4*zt2;
bxyz[1+4*(j+l1)] = -at4*zt3 - at2*zt1;
bxyz[2+4*(j+l1)] = at2*zt2;
bxyz[4*(j+k1+l1)] = zero;
bxyz[1+4*(j+k1+l1)] = zero;
bxyz[2+4*(j+k1+l1)] = zero;
at1 = at1*(cu[4*(j+lj)]*conjf(cu[4*(j+lj)])
+ cu[1+4*(j+lj)]*conjf(cu[1+4*(j+lj)])
+ cu[2+4*(j+lj)]*conjf(cu[2+4*(j+lj)])
+ cu[4*(j+l1)]*conjf(cu[4*(j+l1)])
+ cu[1+4*(j+l1)]*conjf(cu[1+4*(j+l1)])
+ cu[2+4*(j+l1)]*conjf(cu[2+4*(j+l1)]));
wp += (double) at1;
}
/* mode numbers kx = 0, nx/2 */
at1 = ci2*crealf(ffc[ll]);
at4 = dkz*at1;
at1 = at1*cimagf(ffc[ll]);
zt2 = -cimagf(cu[1+4*(lj)]) + crealf(cu[1+4*(lj)])*_Complex_I;
zt3 = -cimagf(cu[4*(lj)]) + crealf(cu[4*(lj)])*_Complex_I;
bxyz[4*lj] = -at4*zt2;
bxyz[1+4*lj] = at4*zt3;
bxyz[2+4*lj] = zero;
bxyz[4*(k1+lj)] = zero;
bxyz[1+4*(k1+lj)] = zero;
bxyz[2+4*(k1+lj)] = zero;
bxyz[4*l1] = zero;
bxyz[1+4*l1] = zero;
bxyz[2+4*l1] = zero;
bxyz[4*(k1+l1)] = zero;
bxyz[1+4*(k1+l1)] = zero;
bxyz[2+4*(k1+l1)] = zero;
at1 = at1*(cu[4*lj]*conjf(cu[4*lj])
+ cu[1+4*lj]*conjf(cu[1+4*lj])
+ cu[2+4*lj]*conjf(cu[2+4*lj]));
wp += (double) at1;
}
/* mode numbers kz = 0, nz/2 */
l1 = nxvyh*nzh;
for (k = 1; k < nyh; k++) {
dky = dny*(float) k;
v_it = _mm512_set1_epi32(k);
v_dky = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dky = _mm512_mul_ps(v_dny,v_dky);
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
/* add ky to curl operators */
v_dk1 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(12336),v_dky);
v_dk2 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(771),v_dky);
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* at1 = ci2*crealf(ffc[j+kk]); */
v_at1 = _mm512_mask_loadunpacklo_ps(v_zero,_mm512_int2mask(15),
(float *)&ffc[j+kk]);
v_at1 = _mm512_mask_loadunpackhi_ps(v_at1,_mm512_int2mask(15),
(float *)&ffc[j+kk+8]);
v_at1 = _mm512_permute4f128_ps(v_at1,0);
v_at4 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(13260),(__m512i)v_at1,78);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at4,
_mm512_int2mask(43690),(__m512i)v_at4,177);
v_at1 = _mm512_mul_ps(v_ci2,v_at1);
/* at2 = at1*dnx*(float) j; */
/* at3 = dky*at1; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dkx = _mm512_mul_ps(v_dnx,v_dkx);
/* add kx to curl operators */
v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(3084),
v_dkx);
v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(12336),
v_dkx);
/* normalize curl operators */
v_at2 = _mm512_mul_ps(v_at1,v_dk1);
v_at3 = _mm512_mul_ps(v_at1,v_dk2);
/* at1 = at1*cimagf(ffc[j+kk]); */
v_at4 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at4,
_mm512_int2mask(21845),(__m512i)v_at4,177);
v_at1 = _mm512_mul_ps(v_at1,v_at4);
/* zt1 = -cimagf(cu[2+4*(j+kj)]) */
/* + crealf(cu[2+4*(j+kj)])*_Complex_I; */
/* zt2 = -cimagf(cu[1+4*(j+kj)]) */
/* + crealf(cu[1+4*(j+kj)])*_Complex_I; */
/* zt3 = -cimagf(cu[4*(j+kj)]) */
/* + crealf(cu[4*(j+kj)])*_Complex_I; */
v_zt3 = _mm512_load_ps((float *)&cu[4*(j+kj)]);
v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690),v_zero,
v_zt3);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* bxyz[4*(j+kj)] = at3*zt1; */
/* bxyz[1+4*(j+kj)] = -at2*zt1; */
/* bxyz[2+4*(j+kj)] = at2*zt2 - at3*zt3; */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_sub_ps(v_zt1,v_zt2);
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),
v_zero);
v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255),
v_zero);
}
_mm512_store_ps((float *)&bxyz[4*(j+kj)],v_zt1);
/* wp += at1*(cu[4*(j+kj)]*conjf(cu[4*(j+kj)]) */
/* + cu[1+4*(j+kj)]*conjf(cu[1+4*(j+kj)]) */
/* + cu[2+4*(j+kj)]*conjf(cu[2+4*(j+kj)])); */
v_zt4 = _mm512_mul_ps(v_at1,_mm512_mask_mul_ps(v_zero,
_mm512_int2mask(16191),v_zt3,v_zt3));
/* zt1 = -cimagf(cu[2+4*(j+k1)]) */
/* + crealf(cu[2+4*(j+k1)])*_Complex_I; */
/* zt2 = -cimagf(cu[1+4*(j+k1)]) */
/* + crealf(cu[1+4*(j+k1)])*_Complex_I; */
/* zt3 = -cimagf(cu[4*(j+k1)]) */
/* + crealf(cu[4*(j+k1)])*_Complex_I; */
v_zt3 = _mm512_load_ps((float *)&cu[4*(j+k1)]);
v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690),v_zero,
v_zt3);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
v_zt1 = _mm512_mask_sub_ps(v_at2,_mm512_int2mask(12336),v_zero,
v_at2);
v_zt2 = _mm512_mask_sub_ps(v_at3,_mm512_int2mask(771),v_zero,
v_at3);
/* bxyz[4*(j+k1)] = -at3*zt1; */
/* bxyz[1+4*(j+k1)] = -at2*zt1; */
/* bxyz[2+4*(j+k1)] = at2*zt2 + at3*zt3; */
v_zt1 = _mm512_mul_ps(v_zt1,v_zt3);
v_zt2 = _mm512_mul_ps(v_zt2,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_sub_ps(v_zt1,v_zt2);
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),
v_zero);
v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255),
v_zero);
}
_mm512_store_ps((float *)&bxyz[4*(j+k1)],v_zt1);
/* wp += at1*(cu[4*(j+k1)]*conjf(cu[4*(j+k1)]) */
/* + cu[1+4*(j+k1)]*conjf(cu[1+4*(j+k1)]) */
/* + cu[2+4*(j+k1)]*conjf(cu[2+4*(j+k1)])); */
v_zt4 = _mm512_fmadd_ps(v_at1,_mm512_mask_mul_ps(v_zero,
_mm512_int2mask(16191),v_zt3,v_zt3),v_zt4);
/* convert to double precision before accumulating */
v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt4));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt4,78));
v_wp = _mm512_add_pd(v_wp,v_d);
/* bxyz[4*(j+kj+l1)] = zero; */
/* bxyz[1+4*(j+kj+l1)] = zero; */
/* bxyz[2+4*(j+kj+l1)] = zero; */
_mm512_store_ps((float *)&bxyz[4*(j+kj+l1)],v_zero);
/* bxyz[4*(j+k1+l1)] = zero; */
/* bxyz[1+4*(j+k1+l1)] = zero; */
/* bxyz[2+4*(j+k1+l1)] = zero; */
_mm512_store_ps((float *)&bxyz[4*(j+k1+l1)],v_zero);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
at1 = ci2*crealf(ffc[j+kk]);
at2 = at1*dnx*(float) j;
at3 = dky*at1;
at1 = at1*cimagf(ffc[j+kk]);
zt1 = -cimagf(cu[2+4*(j+kj)])
+ crealf(cu[2+4*(j+kj)])*_Complex_I;
zt2 = -cimagf(cu[1+4*(j+kj)])
+ crealf(cu[1+4*(j+kj)])*_Complex_I;
zt3 = -cimagf(cu[4*(j+kj)])
+ crealf(cu[4*(j+kj)])*_Complex_I;
bxyz[4*(j+kj)] = at3*zt1;
bxyz[1+4*(j+kj)] = -at2*zt1;
bxyz[2+4*(j+kj)] = at2*zt2 - at3*zt3;
zt1 = -cimagf(cu[2+4*(j+k1)])
+ crealf(cu[2+4*(j+k1)])*_Complex_I;
zt2 = -cimagf(cu[1+4*(j+k1)])
+ crealf(cu[1+4*(j+k1)])*_Complex_I;
zt3 = -cimagf(cu[4*(j+k1)])
+ crealf(cu[4*(j+k1)])*_Complex_I;
bxyz[4*(j+k1)] = -at3*zt1;
bxyz[1+4*(j+k1)] = -at2*zt1;
bxyz[2+4*(j+k1)] = at2*zt2 + at3*zt3;
bxyz[4*(j+kj+l1)] = zero;
bxyz[1+4*(j+kj+l1)] = zero;
bxyz[2+4*(j+kj+l1)] = zero;
bxyz[4*(j+k1+l1)] = zero;
bxyz[1+4*(j+k1+l1)] = zero;
bxyz[2+4*(j+k1+l1)] = zero;
at1 = at1*(cu[4*(j+kj)]*conjf(cu[4*(j+kj)])
+ cu[1+4*(j+kj)]*conjf(cu[1+4*(j+kj)])
+ cu[2+4*(j+kj)]*conjf(cu[2+4*(j+kj)])
+ cu[4*(j+k1)]*conjf(cu[4*(j+k1)])
+ cu[1+4*(j+k1)]*conjf(cu[1+4*(j+k1)])
+ cu[2+4*(j+k1)]*conjf(cu[2+4*(j+k1)]));
wp += (double) at1;
}
}
/* mode numbers kx = 0, nx/2 */
for (k = 1; k < nyh; k++) {
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
at1 = ci2*crealf(ffc[kk]);
at3 = at1*dny*(float) k;
at1 = at1*cimagf(ffc[kk]);
zt1 = -cimagf(cu[2+4*(kj)]) + crealf(cu[2+4*(kj)])*_Complex_I;
zt3 = -cimagf(cu[4*(kj)]) + crealf(cu[4*(kj)])*_Complex_I;
bxyz[4*kj] = at3*zt1;
bxyz[1+4*kj] = zero;
bxyz[2+4*kj] = -at3*zt3;
bxyz[4*k1] = zero;
bxyz[1+4*k1] = zero;
bxyz[2+4*k1] = zero;
bxyz[4*(kj+l1)] = zero;
bxyz[1+4*(kj+l1)] = zero;
bxyz[2+4*(kj+l1)] = zero;
bxyz[4*(k1+l1)] = zero;
bxyz[1+4*(k1+l1)] = zero;
bxyz[2+4*(k1+l1)] = zero;
at1 = at1*(cu[4*kj]*conjf(cu[4*kj])
+ cu[1+4*kj]*conjf(cu[1+4*kj])
+ cu[2+4*kj]*conjf(cu[2+4*kj]));
wp += (double) at1;
}
/* mode numbers ky = 0, ny/2 */
k1 = nxvh*nyh;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* at1 = ci2*crealf(ffc[j]); */
v_at1 = _mm512_mask_loadunpacklo_ps(v_zero,_mm512_int2mask(15),
(float *)&ffc[j]);
v_at1 = _mm512_mask_loadunpackhi_ps(v_at1,_mm512_int2mask(15),
(float *)&ffc[j+8]);
v_at1 = _mm512_permute4f128_ps(v_at1,0);
v_at4 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(13260),(__m512i)v_at1,78);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at4,
_mm512_int2mask(43690),(__m512i)v_at4,177);
v_at1 = _mm512_mul_ps(v_ci2,v_at1);
/* at2 = at1*dnx*(float) j; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dkx = _mm512_mul_ps(v_dnx,v_dkx);
/* add kx to curl operators */
v_dk1 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(3084),v_dkx);
v_dk2 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(12336),v_dkx);
/* normalize curl operators */
v_at2 = _mm512_mul_ps(v_at1,v_dk1);
v_at3 = _mm512_mul_ps(v_at1,v_dk2);
/* at1 = at1*cimagf(ffc[j]); */
v_at4 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at4,
_mm512_int2mask(21845),(__m512i)v_at4,177);
v_at1 = _mm512_mul_ps(v_at1,v_at4);
/* zt1 = -cimagf(cu[2+4*j]) + crealf(cu[2+4*j])*_Complex_I; */
/* zt2 = -cimagf(cu[1+4*j]) + crealf(cu[1+4*j])*_Complex_I; */
v_zt3 = _mm512_load_ps((float *)&cu[4*j]);
v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690),v_zero,
v_zt3);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* bxyz[4*j] = zero; */
/* bxyz[1+4*j] = -at2*zt1; */
/* bxyz[2+4*j] = at2*zt2; */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_sub_ps(v_zt1,v_zt2);
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),v_zero);
v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255),v_zero);
}
_mm512_store_ps((float *)&bxyz[4*j],v_zt1);
/* wp += at1*(cu[4*j]*conjf(cu[4*j]) */
/* + cu[1+4*j]*conjf(cu[1+4*j]) */
/* + cu[2+4*j]*conjf(cu[2+4*j])); */
v_zt4 = _mm512_mul_ps(v_at1,_mm512_mask_mul_ps(v_zero,
_mm512_int2mask(16191),v_zt3,v_zt3));
/* convert to double precision before accumulating */
v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt4));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt4,78));
v_wp = _mm512_add_pd(v_wp,v_d);
/* bxyz[4*(j+k1)] = zero; */
/* bxyz[1+4*(j+k1)] = zero; */
/* bxyz[2+4*(j+k1)] = zero; */
_mm512_store_ps((float *)&bxyz[4*(j+k1)],v_zero);
/* bxyz[4*(j+l1)] = zero; */
/* bxyz[1+4*(j+l1)] = zero; */
/* bxyz[2+4*(j+l1)] = zero; */
_mm512_store_ps((float *)&bxyz[4*(j+l1)],v_zero);
/* bxyz[4*(j+k1+l1)] = zero; */
/* bxyz[1+4*(j+k1+l1)] = zero; */
/* bxyz[2+4*(j+k1+l1)] = zero; */
_mm512_store_ps((float *)&bxyz[4*(j+k1+l1)],v_zero);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
at1 = ci2*crealf(ffc[j]);
at2 = at1*dnx*(float) j;
at1 = at1*cimagf(ffc[j]);
zt1 = -cimagf(cu[2+4*j]) + crealf(cu[2+4*j])*_Complex_I;
zt2 = -cimagf(cu[1+4*j]) + crealf(cu[1+4*j])*_Complex_I;
bxyz[4*j] = zero;
bxyz[1+4*j] = -at2*zt1;
bxyz[2+4*j] = at2*zt2;
bxyz[4*(j+k1)] = zero;
bxyz[1+4*(j+k1)] = zero;
bxyz[2+4*(j+k1)] = zero;
bxyz[4*(j+l1)] = zero;
bxyz[1+4*(j+l1)] = zero;
bxyz[2+4*(j+l1)] = zero;
bxyz[4*(j+k1+l1)] = zero;
bxyz[1+4*(j+k1+l1)] = zero;
bxyz[2+4*(j+k1+l1)] = zero;
at1 = at1*(cu[4*j]*conjf(cu[4*j])
+ cu[1+4*j]*conjf(cu[1+4*j])
+ cu[2+4*j]*conjf(cu[2+4*j]));
wp += (double) at1;
}
bxyz[0] = zero;
bxyz[1] = zero;
bxyz[2] = zero;
bxyz[4*k1] = zero;
bxyz[1+4*k1] = zero;
bxyz[2+4*k1] = zero;
bxyz[4*l1] = zero;
bxyz[1+4*l1] = zero;
bxyz[2+4*l1] = zero;
bxyz[4*(k1+l1)] = zero;
bxyz[1+4*(k1+l1)] = zero;
bxyz[2+4*(k1+l1)] = zero;
d0 = _mm512_reduce_add_pd(v_wp);
*wm = (wp + d0)*((float) nx)*((float) ny)*((float) nz);
return;
}
/*--------------------------------------------------------------------*/
void ckncmaxwel3(float complex exyz[], float complex bxyz[],
float complex cu[], float complex ffc[], float ci,
float dt, float *wf, float *wm, int nx, int ny, int nz,
int nxvh, int nyv, int nzv, int nxhd, int nyhd,
int nzhd) {
/* this subroutine solves 3d maxwell's equation in fourier space for
transverse electric and magnetic fields with periodic boundary
conditions.
input: all, output: wf, wm, exyz, bxyz
approximate flop count is:
680*nxc*nyc*nzc + 149*(nxc*nyc + nxc*nzc + nyc*nzc)
plus nxc*nyc*nzc divides
where nxc = nx/2 - 1, nyc = ny/2 - 1, nzc = nz/2 - 1
the magnetic field is first updated half a step using the equations:
bx[kz][ky][kx] = bx[kz][ky][kx] - .5*dt*sqrt(-1)*
(ky*ez[kz][ky][kx]-kz*ey[kz][ky][kx])
by[kz][ky][kx] = by[kz][ky][kx] - .5*dt*sqrt(-1)*
(kz*ex[kz][ky][kx]-kx*ez[kz][ky][kx])
bz[kz][ky][kx] = bz[kz][ky][kx] - .5*dt*sqrt(-1)*
(kx*ey[kz][ky][kx]-ky*ex[kz][ky][kx])
the electric field is then updated a whole step using the equations:
ex[kz][ky][kx] = ex[kz][ky][kx] + c2*dt*sqrt(-1)
*(ky*bz[kz][ky][kx]-kz*by[kz][ky][kx])
- affp*dt*cux[kz][ky][kx]*s[kz][ky][kx]
ey[kz][ky][kx] = ey[kz][ky][kx] + c2*dt*sqrt(-1)*
*(kz*bx[kz][ky][kx]-kx*bz[kz][ky][kx])
- affp*dt*cuy[kz][ky][kx]*s[kz][ky][kx]
ez[kz][ky][kx] = ez[kz][ky][kx] + c2*dt*sqrt(-1)
*(kx*by[kz][ky][kx]-ky*bx[kz][ky][kx])
- affp*dt*cuz[kz][ky][kx]*s[kz][ky][kx]
the magnetic field is finally updated the remaining half step with
the new electric field and the previous magnetic field equations.
where kx = 2pi*j/nx, ky = 2pi*k/ny, kz = 2pi*l/nz, c2 = 1./(ci*ci)
and s[kz][ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2+(kz*az)**2)
j,k,l = fourier mode numbers, except for
ex(kx=pi) = ey(kx=pi) = ez(kx=pi) = 0,
ex(ky=pi) = ey(ky=pi) = ex(ky=pi) = 0,
ex(kz=pi) = ey(kz=pi) = ez(kz=pi) = 0,
ex(kx=0,ky=0,kz=0) = ey(kx=0,ky=0,kz=0) = ez(kx=0,ky=0,kz=0) = 0.
and similarly for bx, by, bz.
cu[l][k][j][i] = complex current density
exyz[l][k][j][i] = complex transverse electric field
bxyz[l][k][j][i] = complex magnetic field
for component i, all for fourier mode (j1,k,l)
real(ffc[0][0][0]) = affp = normalization constant = nx*ny*nz/np,
where np=number of particles
aimag(ffc[l][k][j]) = finite-size particle shape factor s,
s[kz][ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2+(kz*az)**2)/2)
for fourier mode (j,k,l)
ci = reciprocal of velocity of light
dt = time interval between successive calculations
transverse electric field energy is also calculated, using
wf = nx*ny*nz**sum((1/affp)*|exyz[kz][ky][kx]|**2)
magnetic field energy is also calculated, using
wm = nx*ny*nz**sum((c2/affp)*|bxyz[kz][ky][kx]|**2)
nx/ny/nz = system length in x/y/z direction
nxvh = second dimension of field arrays, must be >= nxh
nyv = third dimension of field arrays, must be >= ny
nzv = fourth dimension of field arrays, must be >= nz
nxhd = second dimension of form factor array, must be >= nxh
nyhd = third dimension of form factor array, must be >= nyh
nzhd = fourth dimension of form factor array, must be >= nzh
requires KNC, cu, exyz, bxyz, ffc need to be 64 byte aligned
nxhd needs to be a multiple of 8
nxvh needs to be a multiple of 2
cu, exyz, bxyz needs to have 4 components
local data */
int nxh, nyh, nzh, nxhs, itn, j, k, l, k1, l1, kk, kj, ll, lj;
int nxyhd, nxvyh;
float dnx, dny, dnz, dth, c2, cdt, affp, anorm, dkx, dky, dkz;
float adt, afdt;
float at1;
float complex zero, zt1, zt2, zt3, zt4, zt5, zt6, zt7, zt8, zt9;
double wp, ws, d0;
__m512i v_j, v_it, v_n, v_m;
__m512 v_dnx, v_dny, v_dnz, v_dkx, v_dky, v_dkz;
__m512 v_zero, v_cdt, v_adt, v_afdt, v_dth, v_anorm;
__m512 v_dk1, v_dk2, v_at1, v_at2, v_at3, v_at4;
__m512 v_zt1, v_zt2, v_zt3, v_zt4, v_zt5, v_zt6, v_zt7;
__m512d v_wp, v_ws, v_d;
if (ci <= 0.0)
return;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
nzh = 1 > nz/2 ? 1 : nz/2;
nxhs = 2*(nxh/2);
itn = 1 > nxhs ? 1 : nxhs;
nxyhd = nxhd*nyhd;
nxvyh = nxvh*nyv;
dnx = 6.28318530717959/(float) nx;
dny = 6.28318530717959/(float) ny;
dnz = 6.28318530717959/(float) nz;
dth = 0.5*dt;
c2 = 1.0/(ci*ci);
cdt = c2*dt;
affp = creal(ffc[0]);
adt = affp*dt;
zero = 0.0 + 0.0*_Complex_I;
anorm = 1.0/affp;
v_j = _mm512_set_epi32(1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0);
v_n = _mm512_set_epi32(15,14,11,10,9,8,13,12,7,6,3,2,1,0,5,4);
v_m = _mm512_set_epi32(15,14,9,8,13,12,11,10,7,6,1,0,5,4,3,2);
v_dnx = _mm512_set1_ps(dnx);
v_dny = _mm512_set1_ps(dny);
v_dnz = _mm512_set1_ps(dnz);
v_zero = _mm512_setzero_ps();
v_cdt = _mm512_set1_ps(cdt);
v_adt = _mm512_set1_ps(adt);
v_dth = _mm512_set1_ps(dth);
v_anorm = _mm512_set1_ps(anorm);
/* update electromagnetic field and sum field energies */
ws = 0.0;
wp = 0.0;
v_wp = _mm512_set1_pd(0.0);
v_ws = _mm512_set1_pd(0.0);
/* calculate the electromagnetic fields */
/* mode numbers 0 < kx < nx/2, 0 < ky < ny/2, and 0 < kz < nz/2 */
for (l = 1; l < nzh; l++) {
dkz = dnz*(float) l;
v_dkz = _mm512_cvtfxpnt_round_adjustepi32_ps(_mm512_set1_epi32(l),
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dkz = _mm512_mul_ps(v_dnz,v_dkz);
ll = nxyhd*l;
lj = nxvyh*l;
l1 = nxvyh*nz - lj;
/* add kz to curl operators */
v_dk1 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(771),v_dkz);
v_dk2 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(3084),v_dkz);
for (k = 1; k < nyh; k++) {
dky = dny*(float) k;
v_it = _mm512_set1_epi32(k);
v_dky = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dky = _mm512_mul_ps(v_dny,v_dky);
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
/* add ky to curl operators */
v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(12336),
v_dky);
v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(771),
v_dky);
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* dkx = dnx*(float) j; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dkx = _mm512_mul_ps(v_dnx,v_dkx);
/* add kx to curl operators */
v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(3084),
v_dkx);
v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(12336),
v_dkx);
/* afdt = adt*cimagf(ffc[j+kk+ll]); */
v_afdt = _mm512_mask_loadunpacklo_ps(v_zero,
_mm512_int2mask(15),(float *)&ffc[j+kk+ll]);
v_afdt = _mm512_mask_loadunpackhi_ps(v_afdt,
_mm512_int2mask(15),(float *)&ffc[j+kk+ll+8]);
v_afdt = _mm512_permute4f128_ps(v_afdt,0);
v_afdt = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_afdt,
_mm512_int2mask(13260),(__m512i)v_afdt,78);
v_afdt = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_afdt,
_mm512_int2mask(21845),(__m512i)v_afdt,177);
v_afdt = _mm512_mul_ps(v_adt,v_afdt);
/* update magnetic field half time step, ky > 0, kz > 0 */
/* zt1 = -cimagf(exyz[2+4*(j+kj+lj)]) */
/* + crealf(exyz[2+4*(j+kj+lj)])*_Complex_I; */
/* zt2 = -cimagf(exyz[1+4*(j+kj+lj)]) */
/* + crealf(exyz[1+4*(j+kj+lj)])*_Complex_I; */
/* zt3 = -cimagf(exyz[4*(j+kj+lj)]) */
/* + crealf(exyz[4*(j+kj+lj)])*_Complex_I; */
v_zt4 = _mm512_load_ps((float *)&exyz[4*(j+kj+lj)]);
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),
v_zero,v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* zt4 = bxyz[4*(j+kj+lj)] - dth*(dky*zt1 - dkz*zt2); */
/* zt5 = bxyz[1+4*(j+kj+lj)] - dth*(dkz*zt3 - dkx*zt1); */
/* zt6 = bxyz[2+4*(j+kj+lj)] - dth*(dkx*zt2 - dky*zt3); */
v_zt1 = _mm512_mul_ps(v_dk1,v_zt3);
v_zt2 = _mm512_mul_ps(v_dk2,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt2 = _mm512_load_ps((float *)&bxyz[4*(j+kj+lj)]);
v_zt5 = _mm512_sub_ps(v_zt2,v_zt1);
/* update electric field whole time step */
/* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */
/* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */
/* zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690),
v_zero,v_zt5);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* zt7 = exyz[4*(j+kj+lj)] + cdt*(dky*zt1 - dkz*zt2) */
/* - afdt*cu[4*(j+kj+lj)]; */
/* zt8 = exyz[1+4*(j+kj+lj)] + cdt*(dkz*zt3 - dkx*zt1) */
/* - afdt*cu[1+4*(j+kj+lj)]; */
/* zt9 = exyz[2+4*(j+kj+lj)] + cdt*(dkx*zt2 - dky*zt3) */
/* - afdt*cu[2+4*(j+kj+lj)]; */
v_zt1 = _mm512_mul_ps(v_dk1,v_zt3);
v_zt2 = _mm512_mul_ps(v_dk2,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2),
v_zt4);
v_zt2 = _mm512_load_ps((float *)&cu[4*(j+kj+lj)]);
v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191),
v_afdt,v_zt2);
v_zt4 = _mm512_sub_ps(v_zt1,v_zt2);
/* update magnetic field half time step and store electric field */
/* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */
/* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */
/* zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),
v_zero,v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* exyz[4*(j+kj+lj)] = zt7; */
/* exyz[1+4*(j+kj+lj)] = zt8; */
/* exyz[2+4*(j+kj+lj)] = zt9; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255),
v_zero);
_mm512_mask_store_ps((float *)&exyz[4*(j+kj+lj)],
_mm512_int2mask(65280),v_zt4);
}
else {
_mm512_store_ps((float *)&exyz[4*(j+kj+lj)],v_zt4);
}
/* ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) */
/* + zt9*conjf(zt9)); */
v_zt6 = _mm512_mul_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4));
/* zt4 -= dth*(dky*zt1 - dkz*zt2); */
/* zt5 -= dth*(dkz*zt3 - dkx*zt1); */
/* zt6 -= dth*(dkx*zt2 - dky*zt3); */
v_zt1 = _mm512_mul_ps(v_dk1,v_zt3);
v_zt2 = _mm512_mul_ps(v_dk2,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt5 = _mm512_sub_ps(v_zt5,v_zt1);
/* bxyz[4*(j+kj+lj)] = zt4; */
/* bxyz[1+4*(j+kj+lj)] = zt5; */
/* bxyz[2+4*(j+kj+lj)] = zt6; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255),
v_zero);
_mm512_mask_store_ps((float *)&bxyz[4*(j+kj+lj)],
_mm512_int2mask(65280),v_zt5);
}
else {
_mm512_store_ps((float *)&bxyz[4*(j+kj+lj)],v_zt5);
}
/* wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) */
/* + zt6*conjf(zt6)); */
v_zt7 = _mm512_mul_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5));
/* update magnetic field half time step, ky < 0, kz > 0 */
/* zt1 = -cimagf(exyz[2+4*(j+k1+lj)]) */
/* + crealf(exyz[2+4*(j+k1+lj)])*_Complex_I; */
/* zt2 = -cimagf(exyz[1+4*(j+k1+lj)]) */
/* + crealf(exyz[1+4*(j+k1+lj)])*_Complex_I; */
/* zt3 = -cimagf(exyz[4*(j+k1+lj)]) */
/* + crealf(exyz[4*(j+k1+lj)])*_Complex_I; */
v_zt4 = _mm512_load_ps((float *)&exyz[4*(j+k1+lj)]);
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),
v_zero,v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
v_at2 = _mm512_mask_sub_ps(v_dk1,_mm512_int2mask(12336),
v_zero,v_dk1);
v_at3 = _mm512_mask_sub_ps(v_dk2,_mm512_int2mask(771),
v_zero,v_dk2);
/* zt4 = bxyz[4*(j+k1+lj)] + dth*(dky*zt1 + dkz*zt2); */
/* zt5 = bxyz[1+4*(j+k1+lj)] - dth*(dkz*zt3 - dkx*zt1); */
/* zt6 = bxyz[2+4*(j+k1+lj)] - dth*(dkx*zt2 + dky*zt3); */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt2 = _mm512_load_ps((float *)&bxyz[4*(j+k1+lj)]);
v_zt5 = _mm512_sub_ps(v_zt2,v_zt1);
/* update electric field whole time step */
/* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */
/* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */
/* zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690),
v_zero,v_zt5);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* zt7 = exyz[4*(j+k1+lj)] - cdt*(dky*zt1 + dkz*zt2) */
/* - afdt*cu[4*(j+k1+lj)]; */
/* zt8 = exyz[1+4*(j+k1+lj)] + cdt*(dkz*zt3 - dkx*zt1) */
/* - afdt*cu[1+4*(j+k1+lj)]; */
/* zt9 = exyz[2+4*(j+k1+lj)] + cdt*(dkx*zt2 + dky*zt3) */
/* - afdt*cu[2+4*(j+k1+lj)]; */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2),
v_zt4);
v_zt2 = _mm512_load_ps((float *)&cu[4*(j+k1+lj)]);
v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191),
v_afdt,v_zt2);
v_zt4 = _mm512_sub_ps(v_zt1,v_zt2);
/* update magnetic field half time step and store electric field */
/* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */
/* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */
/* zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),
v_zero,v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* exyz[4*(j+k1+lj)] = zt7; */
/* exyz[1+4*(j+k1+lj)] = zt8; */
/* exyz[2+4*(j+k1+lj)] = zt9; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255),
v_zero);
_mm512_mask_store_ps((float *)&exyz[4*(j+k1+lj)],
_mm512_int2mask(65280),v_zt4);
}
else {
_mm512_store_ps((float *)&exyz[4*(j+k1+lj)],v_zt4);
}
/* ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) */
/* + zt9*conjf(zt9)); */
v_zt6 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4),
v_zt6);
/* zt4 += dth*(dky*zt1 + dkz*zt2); */
/* zt5 -= dth*(dkz*zt3 - dkx*zt1); */
/* zt6 -= dth*(dkx*zt2 + dky*zt3); */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt5 = _mm512_sub_ps(v_zt5,v_zt1);
/* bxyz[4*(j+k1+lj)] = zt4; */
/* bxyz[1+4*(j+k1+lj)] = zt5; */
/* bxyz[2+4*(j+k1+lj)] = zt6; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255),
v_zero);
_mm512_mask_store_ps((float *)&bxyz[4*(j+k1+lj)],
_mm512_int2mask(65280),v_zt5);
}
else {
_mm512_store_ps((float *)&bxyz[4*(j+k1+lj)],v_zt5);
}
/* wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) */
/* + zt6*conjf(zt6)); */
v_zt7 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5),
v_zt7);
/* update magnetic field half time step, ky > 0, kz < 0 */
/* zt1 = -cimagf(exyz[2+4*(j+kj+l1)]) */
/* + crealf(exyz[2+4*(j+kj+l1)])*_Complex_I; */
/* zt2 = -cimagf(exyz[1+4*(j+kj+l1)]) */
/* + crealf(exyz[1+4*(j+kj+l1)])*_Complex_I; */
/* zt3 = -cimagf(exyz[4*(j+kj+l1)]) */
/* + crealf(exyz[4*(j+kj+l1)])*_Complex_I; */
v_zt4 = _mm512_load_ps((float *)&exyz[4*(j+kj+l1)]);
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),
v_zero,v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
v_at2 = _mm512_mask_sub_ps(v_dk1,_mm512_int2mask(771),
v_zero,v_dk1);
v_at3 = _mm512_mask_sub_ps(v_dk2,_mm512_int2mask(3084),
v_zero,v_dk2);
/* zt4 = bxyz[4*(j+kj+l1)] - dth*(dky*zt1 + dkz*zt2); */
/* zt5 = bxyz[1+4*(j+kj+l1)] + dth*(dkz*zt3 + dkx*zt1); */
/* zt6 = bxyz[2+4*(j+kj+l1)] - dth*(dkx*zt2 - dky*zt3); */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt2 = _mm512_load_ps((float *)&bxyz[4*(j+kj+l1)]);
v_zt5 = _mm512_sub_ps(v_zt2,v_zt1);
/* update electric field whole time step */
/* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */
/* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */
/* zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690),
v_zero,v_zt5);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* zt7 = exyz[4*(j+kj+l1)] + cdt*(dky*zt1 + dkz*zt2) */
/* - afdt*cu[4*(j+kj+l1)]; */
/* zt8 = exyz[1+4*(j+kj+l1)] - cdt*(dkz*zt3 + dkx*zt1) */
/* - afdt*cu[1+4*(j+kj+l1)]; */
/* zt9 = exyz[2+4*(j+kj+l1)] + cdt*(dkx*zt2 - dky*zt3) */
/* - afdt*cu[2+4*(j+kj+l1)]; */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2),
v_zt4);
v_zt2 = _mm512_load_ps((float *)&cu[4*(j+kj+l1)]);
v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191),
v_afdt,v_zt2);
v_zt4 = _mm512_sub_ps(v_zt1,v_zt2);
/* update magnetic field half time step and store electric field */
/* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */
/* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */
/* zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),
v_zero,v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* exyz[4*(j+kj+l1)] = zt7; */
/* exyz[1+4*(j+kj+l1)] = zt8; */
/* exyz[2+4*(j+kj+l1)] = zt9; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255),
v_zero);
_mm512_mask_store_ps((float *)&exyz[4*(j+kj+l1)],
_mm512_int2mask(65280),v_zt4);
}
else {
_mm512_store_ps((float *)&exyz[4*(j+kj+l1)],v_zt4);
}
/* ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) */
/* + zt9*conjf(zt9)); */
v_zt6 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4),
v_zt6);
/* zt4 -= dth*(dky*zt1 + dkz*zt2); */
/* zt5 += dth*(dkz*zt3 + dkx*zt1); */
/* zt6 -= dth*(dkx*zt2 - dky*zt3); */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt5 = _mm512_sub_ps(v_zt5,v_zt1);
/* bxyz[4*(j+kj+l1)] = zt4; */
/* bxyz[1+4*(j+kj+l1)] = zt5; */
/* bxyz[2+4*(j+kj+l1)] = zt6; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255),
v_zero);
_mm512_mask_store_ps((float *)&bxyz[4*(j+kj+l1)],
_mm512_int2mask(65280),v_zt5);
}
else {
_mm512_store_ps((float *)&bxyz[4*(j+kj+l1)],v_zt5);
}
/* wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) */
/* + zt6*conjf(zt6)); */
v_zt7 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5),
v_zt7);
/* update magnetic field half time step, ky < 0, kz < 0 */
/* zt1 = -cimagf(exyz[2+4*(j+k1+l1)]) */
/* + crealf(exyz[2+4*(j+k1+l1)])*_Complex_I; */
/* zt2 = -cimagf(exyz[1+4*(j+k1+l1)]) */
/* + crealf(exyz[1+4*(j+k1+l1)])*_Complex_I; */
/* zt3 = -cimagf(exyz[4*(j+k1+l1)]) */
/* + crealf(exyz[4*(j+k1+l1)])*_Complex_I; */
v_zt4 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]);
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),
v_zero,v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
v_at2 = _mm512_mask_sub_ps(v_dk1,_mm512_int2mask(13107),
v_zero,v_dk1);
v_at3 = _mm512_mask_sub_ps(v_dk2,_mm512_int2mask(3855),
v_zero,v_dk2);
/* zt4 = bxyz[4*(j+k1+l1)] + dth*(dky*zt1 - dkz*zt2); */
/* zt5 = bxyz[1+4*(j+k1+l1)] + dth*(dkz*zt3 + dkx*zt1); */
/* zt6 = bxyz[2+4*(j+k1+l1)] - dth*(dkx*zt2 + dky*zt3); */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt2 = _mm512_load_ps((float *)&bxyz[4*(j+k1+l1)]);
v_zt5 = _mm512_sub_ps(v_zt2,v_zt1);
/* update electric field whole time step */
/* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */
/* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */
/* zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690),
v_zero,v_zt5);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* zt7 = exyz[4*(j+k1+l1)] - cdt*(dky*zt1 - dkz*zt2) */
/* - afdt*cu[4*(j+k1+l1)]; */
/* zt8 = exyz[1+4*(j+k1+l1)] - cdt*(dkz*zt3 + dkx*zt1) */
/* - afdt*cu[1+4*(j+k1+l1)]; */
/* zt9 = exyz[2+4*(j+k1+l1)] + cdt*(dkx*zt2 + dky*zt3) */
/* - afdt*cu[2+4*(j+k1+l1)]; */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2),
v_zt4);
v_zt2 = _mm512_load_ps((float *)&cu[4*(j+k1+l1)]);
v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191),
v_afdt,v_zt2);
v_zt4 = _mm512_sub_ps(v_zt1,v_zt2);
/* update magnetic field half time step and store electric field */
/* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */
/* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */
/* zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),
v_zero,v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* exyz[4*(j+k1+l1)] = zt7; */
/* exyz[1+4*(j+k1+l1)] = zt8; */
/* exyz[2+4*(j+k1+l1)] = zt9; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255),
v_zero);
_mm512_mask_store_ps((float *)&exyz[4*(j+k1+l1)],
_mm512_int2mask(65280),v_zt4);
}
else {
_mm512_store_ps((float *)&exyz[4*(j+k1+l1)],v_zt4);
}
/* ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) */
/* + zt9*conjf(zt9)); */
v_zt6 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4),
v_zt6);
/* zt4 += dth*(dky*zt1 - dkz*zt2); */
/* zt5 += dth*(dkz*zt3 + dkx*zt1); */
/* zt6 -= dth*(dkx*zt2 + dky*zt3); */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt5 = _mm512_sub_ps(v_zt5,v_zt1);
/* bxyz[4*(j+k1+l1)] = zt4; */
/* bxyz[1+4*(j+k1+l1)] = zt5; */
/* bxyz[2+4*(j+k1+l1)] = zt6; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255),
v_zero);
_mm512_mask_store_ps((float *)&bxyz[4*(j+k1+l1)],
_mm512_int2mask(65280),v_zt5);
}
else {
_mm512_store_ps((float *)&bxyz[4*(j+k1+l1)],v_zt5);
}
/* wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) */
/* + zt6*conjf(zt6)); */
v_zt7 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5),
v_zt7);
/* convert to double precision before accumulating */
v_ws = _mm512_add_pd(v_ws,_mm512_cvtpslo_pd(v_zt6));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt6,78));
v_ws = _mm512_add_pd(v_ws,v_d);
v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt7));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt7,78));
v_wp = _mm512_add_pd(v_wp,v_d);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
dkx = dnx*(float) j;
afdt = adt*cimagf(ffc[j+kk+ll]);
/* update magnetic field half time step, ky > 0, kz > 0 */
zt1 = -cimagf(exyz[2+4*(j+kj+lj)])
+ crealf(exyz[2+4*(j+kj+lj)])*_Complex_I;
zt2 = -cimagf(exyz[1+4*(j+kj+lj)])
+ crealf(exyz[1+4*(j+kj+lj)])*_Complex_I;
zt3 = -cimagf(exyz[4*(j+kj+lj)])
+ crealf(exyz[4*(j+kj+lj)])*_Complex_I;
zt4 = bxyz[4*(j+kj+lj)] - dth*(dky*zt1 - dkz*zt2);
zt5 = bxyz[1+4*(j+kj+lj)] - dth*(dkz*zt3 - dkx*zt1);
zt6 = bxyz[2+4*(j+kj+lj)] - dth*(dkx*zt2 - dky*zt3);
/* update electric field whole time step */
zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I;
zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I;
zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I;
zt7 = exyz[4*(j+kj+lj)] + cdt*(dky*zt1 - dkz*zt2)
- afdt*cu[4*(j+kj+lj)];
zt8 = exyz[1+4*(j+kj+lj)] + cdt*(dkz*zt3 - dkx*zt1)
- afdt*cu[1+4*(j+kj+lj)];
zt9 = exyz[2+4*(j+kj+lj)] + cdt*(dkx*zt2 - dky*zt3)
- afdt*cu[2+4*(j+kj+lj)];
/* update magnetic field half time step and store electric field */
zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I;
zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I;
zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I;
exyz[4*(j+kj+lj)] = zt7;
exyz[1+4*(j+kj+lj)] = zt8;
exyz[2+4*(j+kj+lj)] = zt9;
at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8)
+ zt9*conjf(zt9));
ws += (double) at1;
zt4 -= dth*(dky*zt1 - dkz*zt2);
zt5 -= dth*(dkz*zt3 - dkx*zt1);
zt6 -= dth*(dkx*zt2 - dky*zt3);
bxyz[4*(j+kj+lj)] = zt4;
bxyz[1+4*(j+kj+lj)] = zt5;
bxyz[2+4*(j+kj+lj)] = zt6;
at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5)
+ zt6*conjf(zt6));
wp += (double) at1;
/* update magnetic field half time step, ky < 0, kz > 0 */
zt1 = -cimagf(exyz[2+4*(j+k1+lj)])
+ crealf(exyz[2+4*(j+k1+lj)])*_Complex_I;
zt2 = -cimagf(exyz[1+4*(j+k1+lj)])
+ crealf(exyz[1+4*(j+k1+lj)])*_Complex_I;
zt3 = -cimagf(exyz[4*(j+k1+lj)])
+ crealf(exyz[4*(j+k1+lj)])*_Complex_I;
zt4 = bxyz[4*(j+k1+lj)] + dth*(dky*zt1 + dkz*zt2);
zt5 = bxyz[1+4*(j+k1+lj)] - dth*(dkz*zt3 - dkx*zt1);
zt6 = bxyz[2+4*(j+k1+lj)] - dth*(dkx*zt2 + dky*zt3);
/* update electric field whole time step */
zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I;
zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I;
zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I;
zt7 = exyz[4*(j+k1+lj)] - cdt*(dky*zt1 + dkz*zt2)
- afdt*cu[4*(j+k1+lj)];
zt8 = exyz[1+4*(j+k1+lj)] + cdt*(dkz*zt3 - dkx*zt1)
- afdt*cu[1+4*(j+k1+lj)];
zt9 = exyz[2+4*(j+k1+lj)] + cdt*(dkx*zt2 + dky*zt3)
- afdt*cu[2+4*(j+k1+lj)];
/* update magnetic field half time step and store electric field */
zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I;
zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I;
zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I;
exyz[4*(j+k1+lj)] = zt7;
exyz[1+4*(j+k1+lj)] = zt8;
exyz[2+4*(j+k1+lj)] = zt9;
at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8)
+ zt9*conjf(zt9));
ws += (double) at1;
zt4 += dth*(dky*zt1 + dkz*zt2);
zt5 -= dth*(dkz*zt3 - dkx*zt1);
zt6 -= dth*(dkx*zt2 + dky*zt3);
bxyz[4*(j+k1+lj)] = zt4;
bxyz[1+4*(j+k1+lj)] = zt5;
bxyz[2+4*(j+k1+lj)] = zt6;
at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5)
+ zt6*conjf(zt6));
wp += (double) at1;
/* update magnetic field half time step, ky > 0, kz < 0 */
zt1 = -cimagf(exyz[2+4*(j+kj+l1)])
+ crealf(exyz[2+4*(j+kj+l1)])*_Complex_I;
zt2 = -cimagf(exyz[1+4*(j+kj+l1)])
+ crealf(exyz[1+4*(j+kj+l1)])*_Complex_I;
zt3 = -cimagf(exyz[4*(j+kj+l1)])
+ crealf(exyz[4*(j+kj+l1)])*_Complex_I;
zt4 = bxyz[4*(j+kj+l1)] - dth*(dky*zt1 + dkz*zt2);
zt5 = bxyz[1+4*(j+kj+l1)] + dth*(dkz*zt3 + dkx*zt1);
zt6 = bxyz[2+4*(j+kj+l1)] - dth*(dkx*zt2 - dky*zt3);
/* update electric field whole time step */
zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I;
zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I;
zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I;
zt7 = exyz[4*(j+kj+l1)] + cdt*(dky*zt1 + dkz*zt2)
- afdt*cu[4*(j+kj+l1)];
zt8 = exyz[1+4*(j+kj+l1)] - cdt*(dkz*zt3 + dkx*zt1)
- afdt*cu[1+4*(j+kj+l1)];
zt9 = exyz[2+4*(j+kj+l1)] + cdt*(dkx*zt2 - dky*zt3)
- afdt*cu[2+4*(j+kj+l1)];
/* update magnetic field half time step and store electric field */
zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I;
zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I;
zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I;
exyz[4*(j+kj+l1)] = zt7;
exyz[1+4*(j+kj+l1)] = zt8;
exyz[2+4*(j+kj+l1)] = zt9;
at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8)
+ zt9*conjf(zt9));
ws += (double) at1;
zt4 -= dth*(dky*zt1 + dkz*zt2);
zt5 += dth*(dkz*zt3 + dkx*zt1);
zt6 -= dth*(dkx*zt2 - dky*zt3);
bxyz[4*(j+kj+l1)] = zt4;
bxyz[1+4*(j+kj+l1)] = zt5;
bxyz[2+4*(j+kj+l1)] = zt6;
at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5)
+ zt6*conjf(zt6));
wp += (double) at1;
/* update magnetic field half time step, ky < 0, kz < 0 */
zt1 = -cimagf(exyz[2+4*(j+k1+l1)])
+ crealf(exyz[2+4*(j+k1+l1)])*_Complex_I;
zt2 = -cimagf(exyz[1+4*(j+k1+l1)])
+ crealf(exyz[1+4*(j+k1+l1)])*_Complex_I;
zt3 = -cimagf(exyz[4*(j+k1+l1)])
+ crealf(exyz[4*(j+k1+l1)])*_Complex_I;
zt4 = bxyz[4*(j+k1+l1)] + dth*(dky*zt1 - dkz*zt2);
zt5 = bxyz[1+4*(j+k1+l1)] + dth*(dkz*zt3 + dkx*zt1);
zt6 = bxyz[2+4*(j+k1+l1)] - dth*(dkx*zt2 + dky*zt3);
/* update electric field whole time step */
zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I;
zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I;
zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I;
zt7 = exyz[4*(j+k1+l1)] - cdt*(dky*zt1 - dkz*zt2)
- afdt*cu[4*(j+k1+l1)];
zt8 = exyz[1+4*(j+k1+l1)] - cdt*(dkz*zt3 + dkx*zt1)
- afdt*cu[1+4*(j+k1+l1)];
zt9 = exyz[2+4*(j+k1+l1)] + cdt*(dkx*zt2 + dky*zt3)
- afdt*cu[2+4*(j+k1+l1)];
/* update magnetic field half time step and store electric field */
zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I;
zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I;
zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I;
exyz[4*(j+k1+l1)] = zt7;
exyz[1+4*(j+k1+l1)] = zt8;
exyz[2+4*(j+k1+l1)] = zt9;
at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8)
+ zt9*conjf(zt9));
ws += (double) at1;
zt4 += dth*(dky*zt1 - dkz*zt2);
zt5 += dth*(dkz*zt3 + dkx*zt1);
zt6 -= dth*(dkx*zt2 + dky*zt3);
bxyz[4*(j+k1+l1)] = zt4;
bxyz[1+4*(j+k1+l1)] = zt5;
bxyz[2+4*(j+k1+l1)] = zt6;
at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5)
+ zt6*conjf(zt6));
wp += (double) at1;
}
}
/* mode numbers kx = 0, nx/2 */
for (k = 1; k < nyh; k++) {
dky = dny*(float) k;
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
afdt = adt*cimagf(ffc[kk+ll]);
/* update magnetic field half time step, kz > 0 */
zt1 = -cimagf(exyz[2+4*(kj+lj)])
+ crealf(exyz[2+4*(kj+lj)])*_Complex_I;
zt2 = -cimagf(exyz[1+4*(kj+lj)])
+ crealf(exyz[1+4*(kj+lj)])*_Complex_I;
zt3 = -cimagf(exyz[4*(kj+lj)])
+ crealf(exyz[4*(kj+lj)])*_Complex_I;
zt4 = bxyz[4*(kj+lj)] - dth*(dky*zt1 - dkz*zt2);
zt5 = bxyz[1+4*(kj+lj)] - dth*(dkz*zt3);
zt6 = bxyz[2+4*(kj+lj)] + dth*(dky*zt3);
/* update electric field whole time step */
zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I;
zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I;
zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I;
zt7 = exyz[4*(kj+lj)] + cdt*(dky*zt1 - dkz*zt2)
- afdt*cu[4*(kj+lj)];
zt8 = exyz[1+4*(kj+lj)] + cdt*(dkz*zt3) - afdt*cu[1+4*(kj+lj)];
zt9 = exyz[2+4*(kj+lj)] - cdt*(dky*zt3) - afdt*cu[2+4*(kj+lj)];
/* update magnetic field half time step and store electric field */
zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I;
zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I;
zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I;
exyz[4*(kj+lj)] = zt7;
exyz[1+4*(kj+lj)] = zt8;
exyz[2+4*(kj+lj)] = zt9;
at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9));
ws += (double) at1;
zt4 -= dth*(dky*zt1 - dkz*zt2);
zt5 -= dth*(dkz*zt3);
zt6 += dth*(dky*zt3);
bxyz[4*(kj+lj)] = zt4;
bxyz[1+4*(kj+lj)] = zt5;
bxyz[2+4*(kj+lj)] = zt6;
at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6));
wp += (double) at1;
bxyz[4*(k1+lj)] = zero;
bxyz[1+4*(k1+lj)] = zero;
bxyz[2+4*(k1+lj)] = zero;
exyz[4*(k1+lj)] = zero;
exyz[1+4*(k1+lj)] = zero;
exyz[2+4*(k1+lj)] = zero;
/* update magnetic field half time step, kz < 0 */
zt1 = -cimagf(exyz[2+4*(kj+l1)])
+ crealf(exyz[2+4*(kj+l1)])*_Complex_I;
zt2 = -cimagf(exyz[1+4*(kj+l1)])
+ crealf(exyz[1+4*(kj+l1)])*_Complex_I;
zt3 = -cimagf(exyz[4*(kj+l1)])
+ crealf(exyz[4*(kj+l1)])*_Complex_I;
zt4 = bxyz[4*(kj+l1)] - dth*(dky*zt1 + dkz*zt2);
zt5 = bxyz[1+4*(kj+l1)] + dth*(dkz*zt3);
zt6 = bxyz[2+4*(kj+l1)] + dth*(dky*zt3);
/* update electric field whole time step */
zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I;
zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I;
zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I;
zt7 = exyz[4*(kj+l1)] + cdt*(dky*zt1 + dkz*zt2)
- afdt*cu[4*(kj+l1)];
zt8 = exyz[1+4*(kj+l1)] - cdt*(dkz*zt3) - afdt*cu[1+4*(kj+l1)];
zt9 = exyz[2+4*(kj+l1)] - cdt*(dky*zt3) - afdt*cu[2+4*(kj+l1)];
/* update magnetic field half time step and store electric field */
zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I;
zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I;
zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I;
exyz[4*(kj+l1)] = zt7;
exyz[1+4*(kj+l1)] = zt8;
exyz[2+4*(kj+l1)] = zt9;
at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9));
ws += (double) at1;
zt4 -= dth*(dky*zt1 + dkz*zt2);
zt5 += dth*(dkz*zt3);
zt6 += dth*(dky*zt3);
bxyz[4*(kj+l1)] = zt4;
bxyz[1+4*(kj+l1)] = zt5;
bxyz[2+4*(kj+l1)] = zt6;
at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6));
wp += (double) at1;
bxyz[4*(k1+l1)] = zero;
bxyz[1+4*(k1+l1)] = zero;
bxyz[2+4*(k1+l1)] = zero;
exyz[4*(k1+l1)] = zero;
exyz[1+4*(k1+l1)] = zero;
exyz[2+4*(k1+l1)] = zero;
}
/* mode numbers ky = 0, ny/2 */
k1 = nxvh*nyh;
/* add ky to curl operators */
v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(12336),v_zero);
v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(771),v_zero);
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* dkx = dnx*(float) j; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dkx = _mm512_mul_ps(v_dnx,v_dkx);
/* add kx to curl operators */
v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(3084),
v_dkx);
v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(12336),
v_dkx);
/* afdt = adt*cimagf(ffc[j+ll]); */
v_afdt = _mm512_mask_loadunpacklo_ps(v_zero,
_mm512_int2mask(15),(float *)&ffc[j+ll]);
v_afdt = _mm512_mask_loadunpackhi_ps(v_afdt,
_mm512_int2mask(15),(float *)&ffc[j+ll+8]);
v_afdt = _mm512_permute4f128_ps(v_afdt,0);
v_afdt = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_afdt,
_mm512_int2mask(13260),(__m512i)v_afdt,78);
v_afdt = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_afdt,
_mm512_int2mask(21845),(__m512i)v_afdt,177);
v_afdt = _mm512_mul_ps(v_adt,v_afdt);
/* update magnetic field half time step, kz > 0 */
/* zt1 = -cimagf(exyz[2+4*(j+lj)]) */
/* + crealf(exyz[2+4*(j+lj)])*_Complex_I; */
/* zt2 = -cimagf(exyz[1+4*(j+lj)]) */
/* + crealf(exyz[1+4*(j+lj)])*_Complex_I; */
/* zt3 = -cimagf(exyz[4*(j+lj)]) */
/* + crealf(exyz[4*(j+lj)])*_Complex_I; */
v_zt4 = _mm512_load_ps((float *)&exyz[4*(j+lj)]);
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero,
v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* zt4 = bxyz[4*(j+lj)] + dth*(dkz*zt2); */
/* zt5 = bxyz[1+4*(j+lj)] - dth*(dkz*zt3 - dkx*zt1); */
/* zt6 = bxyz[2+4*(j+lj)] - dth*(dkx*zt2); */
v_zt1 = _mm512_mul_ps(v_dk1,v_zt3);
v_zt2 = _mm512_mul_ps(v_dk2,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt2 = _mm512_load_ps((float *)&bxyz[4*(j+lj)]);
v_zt5 = _mm512_sub_ps(v_zt2,v_zt1);
/* update electric field whole time step */
/* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */
/* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */
/* zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690),v_zero,
v_zt5);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* zt7 = exyz[4*(j+lj)] - cdt*(dkz*zt2) - afdt*cu[4*(j+lj)]; */
/* zt8 = exyz[1+4*(j+lj)] + cdt*(dkz*zt3 - dkx*zt1) */
/* - afdt*cu[1+4*(j+lj)]; */
/* zt9 = exyz[2+4*(j+lj)] + cdt*(dkx*zt2) - afdt*cu[2+4*(j+lj)]; */
v_zt1 = _mm512_mul_ps(v_dk1,v_zt3);
v_zt2 = _mm512_mul_ps(v_dk2,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2),
v_zt4);
v_zt2 = _mm512_load_ps((float *)&cu[4*(j+lj)]);
v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191),
v_afdt,v_zt2);
v_zt4 = _mm512_sub_ps(v_zt1,v_zt2);
/* update magnetic field half time step and store electric field */
/* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */
/* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */
/* zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero,
v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* exyz[4*(j+lj)] = zt7; */
/* exyz[1+4*(j+lj)] = zt8; */
/* exyz[2+4*(j+lj)] = zt9; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255),
v_zero);
_mm512_mask_store_ps((float *)&exyz[4*(j+lj)],
_mm512_int2mask(65280),v_zt4);
}
else {
_mm512_store_ps((float *)&exyz[4*(j+lj)],v_zt4);
}
/* ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); */
v_zt6 = _mm512_mul_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4));
/* zt4 += dth*(dkz*zt2); */
/* zt5 -= dth*(dkz*zt3 - dkx*zt1); */
/* zt6 -= dth*(dkx*zt2); */
v_zt1 = _mm512_mul_ps(v_dk1,v_zt3);
v_zt2 = _mm512_mul_ps(v_dk2,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt5 = _mm512_sub_ps(v_zt5,v_zt1);
/* bxyz[4*(j+lj)] = zt4; */
/* bxyz[1+4*(j+lj)] = zt5; */
/* bxyz[2+4*(j+lj)] = zt6; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255),
v_zero);
_mm512_mask_store_ps((float *)&bxyz[4*(j+lj)],
_mm512_int2mask(65280),v_zt5);
}
else {
_mm512_store_ps((float *)&bxyz[4*(j+lj)],v_zt5);
}
/* wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); */
v_zt7 = _mm512_mul_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5));
/* bxyz[4*(j+k1+lj)] = zero; */
/* bxyz[1+4*(j+k1+lj)] = zero; */
/* bxyz[2+4*(j+k1+lj)] = zero; */
_mm512_store_ps((float *)&bxyz[4*(j+k1+lj)],v_zero);
/* exyz[4*(j+k1+lj)] = zero; */
/* exyz[1+4*(j+k1+lj)] = zero; */
/* exyz[2+4*(j+k1+lj)] = zero; */
_mm512_store_ps((float *)&exyz[4*(j+k1+lj)],v_zero);
/* update magnetic field half time step, kz > 0 */
/* zt1 = -cimagf(exyz[2+4*(j+l1)]) */
/* + crealf(exyz[2+4*(j+l1)])*_Complex_I; */
/* zt2 = -cimagf(exyz[1+4*(j+l1)]) */
/* + crealf(exyz[1+4*(j+l1)])*_Complex_I; */
/* zt3 = -cimagf(exyz[4*(j+l1)]) */
/* + crealf(exyz[4*(j+l1)])*_Complex_I; */
v_zt4 = _mm512_load_ps((float *)&exyz[4*(j+l1)]);
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero,
v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
v_at2 = _mm512_mask_sub_ps(v_dk1,_mm512_int2mask(771),v_zero,
v_dk1);
v_at3 = _mm512_mask_sub_ps(v_dk2,_mm512_int2mask(3084),v_zero,
v_dk2);
/* zt4 = bxyz[4*(j+l1)] - dth*(dkz*zt2); */
/* zt5 = bxyz[1+4*(j+l1)] + dth*(dkz*zt3 + dkx*zt1); */
/* zt6 = bxyz[2+4*(j+l1)] - dth*(dkx*zt2); */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt2 = _mm512_load_ps((float *)&bxyz[4*(j+l1)]);
v_zt5 = _mm512_sub_ps(v_zt2,v_zt1);
/* update electric field whole time step */
/* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */
/* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */
/* zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690),v_zero,
v_zt5);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* zt7 = exyz[4*(j+l1)] + cdt*(dkz*zt2) - afdt*cu[4*(j+l1)]; */
/* zt8 = exyz[1+4*(j+l1)] - cdt*(dkz*zt3 + dkx*zt1) */
/* - afdt*cu[1+4*(j+l1)]; */
/* zt9 = exyz[2+4*(j+l1)] + cdt*(dkx*zt2) - afdt*cu[2+4*(j+l1)]; */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2),
v_zt4);
v_zt2 = _mm512_load_ps((float *)&cu[4*(j+l1)]);
v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191),
v_afdt,v_zt2);
v_zt4 = _mm512_sub_ps(v_zt1,v_zt2);
/* update magnetic field half time step and store electric field */
/* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */
/* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */
/* zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero,
v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* exyz[4*(j+l1)] = zt7; */
/* exyz[1+4*(j+l1)] = zt8; */
/* exyz[2+4*(j+l1)] = zt9; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255),
v_zero);
_mm512_mask_store_ps((float *)&exyz[4*(j+l1)],
_mm512_int2mask(65280),v_zt4);
}
else {
_mm512_store_ps((float *)&exyz[4*(j+l1)],v_zt4);
}
/* ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); */
v_zt6 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4),
v_zt6);
/* zt4 -= dth*(dkz*zt2); */
/* zt5 += dth*(dkz*zt3 + dkx*zt1); */
/* zt6 -= dth*(dkx*zt2); */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt5 = _mm512_sub_ps(v_zt5,v_zt1);
/* bxyz[4*(j+l1)] = zt4; */
/* bxyz[1+4*(j+l1)] = zt5; */
/* bxyz[2+4*(j+l1)] = zt6; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255),
v_zero);
_mm512_mask_store_ps((float *)&bxyz[4*(j+l1)],
_mm512_int2mask(65280),v_zt5);
}
else {
_mm512_store_ps((float *)&bxyz[4*(j+l1)],v_zt5);
}
/* wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); */
v_zt7 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5),
v_zt7);
/* convert to double precision before accumulating */
v_ws = _mm512_add_pd(v_ws,_mm512_cvtpslo_pd(v_zt6));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt6,78));
v_ws = _mm512_add_pd(v_ws,v_d);
v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt7));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt7,78));
v_wp = _mm512_add_pd(v_wp,v_d);
/* bxyz[4*(j+k1+l1)] = zero; */
/* bxyz[1+4*(j+k1+l1)] = zero; */
/* bxyz[2+4*(j+k1+l1)] = zero; */
_mm512_store_ps((float *)&bxyz[4*(j+k1+l1)],v_zero);
/* exyz[4*(j+k1+l1)] = zero; */
/* exyz[1+4*(j+k1+l1)] = zero; */
/* exyz[2+4*(j+k1+l1)] = zero; */
_mm512_store_ps((float *)&exyz[4*(j+k1+l1)],v_zero);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
dkx = dnx*(float) j;
afdt = adt*cimagf(ffc[j+ll]);
/* update magnetic field half time step, kz > 0 */
zt1 = -cimagf(exyz[2+4*(j+lj)])
+ crealf(exyz[2+4*(j+lj)])*_Complex_I;
zt2 = -cimagf(exyz[1+4*(j+lj)])
+ crealf(exyz[1+4*(j+lj)])*_Complex_I;
zt3 = -cimagf(exyz[4*(j+lj)])
+ crealf(exyz[4*(j+lj)])*_Complex_I;
zt4 = bxyz[4*(j+lj)] + dth*(dkz*zt2);
zt5 = bxyz[1+4*(j+lj)] - dth*(dkz*zt3 - dkx*zt1);
zt6 = bxyz[2+4*(j+lj)] - dth*(dkx*zt2);
/* update electric field whole time step */
zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I;
zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I;
zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I;
zt7 = exyz[4*(j+lj)] - cdt*(dkz*zt2) - afdt*cu[4*(j+lj)];
zt8 = exyz[1+4*(j+lj)] + cdt*(dkz*zt3 - dkx*zt1)
- afdt*cu[1+4*(j+lj)];
zt9 = exyz[2+4*(j+lj)] + cdt*(dkx*zt2) - afdt*cu[2+4*(j+lj)];
/* update magnetic field half time step and store electric field */
zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I;
zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I;
zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I;
exyz[4*(j+lj)] = zt7;
exyz[1+4*(j+lj)] = zt8;
exyz[2+4*(j+lj)] = zt9;
at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9));
ws += (double) at1;
zt4 += dth*(dkz*zt2);
zt5 -= dth*(dkz*zt3 - dkx*zt1);
zt6 -= dth*(dkx*zt2);
bxyz[4*(j+lj)] = zt4;
bxyz[1+4*(j+lj)] = zt5;
bxyz[2+4*(j+lj)] = zt6;
at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6));
wp += (double) at1;
bxyz[4*(j+k1+lj)] = zero;
bxyz[1+4*(j+k1+lj)] = zero;
bxyz[2+4*(j+k1+lj)] = zero;
exyz[4*(j+k1+lj)] = zero;
exyz[1+4*(j+k1+lj)] = zero;
exyz[2+4*(j+k1+lj)] = zero;
/* update magnetic field half time step, kz > 0 */
zt1 = -cimagf(exyz[2+4*(j+l1)])
+ crealf(exyz[2+4*(j+l1)])*_Complex_I;
zt2 = -cimagf(exyz[1+4*(j+l1)])
+ crealf(exyz[1+4*(j+l1)])*_Complex_I;
zt3 = -cimagf(exyz[4*(j+l1)])
+ crealf(exyz[4*(j+l1)])*_Complex_I;
zt4 = bxyz[4*(j+l1)] - dth*(dkz*zt2);
zt5 = bxyz[1+4*(j+l1)] + dth*(dkz*zt3 + dkx*zt1);
zt6 = bxyz[2+4*(j+l1)] - dth*(dkx*zt2);
/* update electric field whole time step */
zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I;
zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I;
zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I;
zt7 = exyz[4*(j+l1)] + cdt*(dkz*zt2) - afdt*cu[4*(j+l1)];
zt8 = exyz[1+4*(j+l1)] - cdt*(dkz*zt3 + dkx*zt1)
- afdt*cu[1+4*(j+l1)];
zt9 = exyz[2+4*(j+l1)] + cdt*(dkx*zt2) - afdt*cu[2+4*(j+l1)];
/* update magnetic field half time step and store electric field */
zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I;
zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I;
zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I;
exyz[4*(j+l1)] = zt7;
exyz[1+4*(j+l1)] = zt8;
exyz[2+4*(j+l1)] = zt9;
at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9));
ws += (double) at1;
zt4 -= dth*(dkz*zt2);
zt5 += dth*(dkz*zt3 + dkx*zt1);
zt6 -= dth*(dkx*zt2);
bxyz[4*(j+l1)] = zt4;
bxyz[1+4*(j+l1)] = zt5;
bxyz[2+4*(j+l1)] = zt6;
at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6));
wp += (double) at1;
bxyz[4*(j+k1+l1)] = zero;
bxyz[1+4*(j+k1+l1)] = zero;
bxyz[2+4*(j+k1+l1)] = zero;
exyz[4*(j+k1+l1)] = zero;
exyz[1+4*(j+k1+l1)] = zero;
exyz[2+4*(j+k1+l1)] = zero;
}
/* mode numbers kx = 0, nx/2 */
afdt = adt*cimagf(ffc[ll]);
/* update magnetic field half time step */
zt2 = -cimagf(exyz[1+4*(lj)]) + crealf(exyz[1+4*(lj)])*_Complex_I;
zt3 = -cimagf(exyz[4*(lj)]) + crealf(exyz[4*(lj)])*_Complex_I;
zt4 = bxyz[4*lj] + dth*(dkz*zt2);
zt5 = bxyz[1+4*lj] - dth*(dkz*zt3);
/* update electric field whole time step */
zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I;
zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I;
zt7 = exyz[4*lj] - cdt*(dkz*zt2) - afdt*cu[4*lj];
zt8 = exyz[1+4*lj] + cdt*(dkz*zt3) - afdt*cu[1+4*lj];
/* update magnetic field half time step and store electric field */
zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I;
zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I;
exyz[4*lj] = zt7;
exyz[1+4*lj] = zt8;
exyz[2+4*lj] = zero;
at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8));
ws += (double) at1;
zt4 += dth*(dkz*zt2);
zt5 -= dth*(dkz*zt3);
bxyz[4*lj] = zt4;
bxyz[1+4*lj] = zt5;
bxyz[2+4*lj] = zero;
at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5));
wp += (double) at1;
bxyz[4*(k1+lj)] = zero;
bxyz[1+4*(k1+lj)] = zero;
bxyz[2+4*(k1+lj)] = zero;
exyz[4*(k1+lj)] = zero;
exyz[1+4*(k1+lj)] = zero;
exyz[2+4*(k1+lj)] = zero;
bxyz[4*l1] = zero;
bxyz[1+4*l1] = zero;
bxyz[2+4*l1] = zero;
exyz[4*l1] = zero;
exyz[1+4*l1] = zero;
exyz[2+4*l1] = zero;
bxyz[4*(k1+l1)] = zero;
bxyz[1+4*(k1+l1)] = zero;
bxyz[2+4*(k1+l1)] = zero;
exyz[4*(k1+l1)] = zero;
exyz[1+4*(k1+l1)] = zero;
exyz[2+4*(k1+l1)]= zero;
}
/* mode numbers kz = 0, nz/2 */
l1 = nxvyh*nzh;
for (k = 1; k < nyh; k++) {
/* dky = dny*(float) k; */
v_it = _mm512_set1_epi32(k);
v_dky = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dky = _mm512_mul_ps(v_dny,v_dky);
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
/* add ky to curl operators */
v_dk1 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(12336),v_dky);
v_dk2 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(771),v_dky);
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* dkx = dnx*(float) j; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dkx = _mm512_mul_ps(v_dnx,v_dkx);
/* add kx to curl operators */
v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(3084),
v_dkx);
v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(12336),
v_dkx);
/* afdt = adt*cimagf(ffc[j+kk]); */
v_afdt = _mm512_mask_loadunpacklo_ps(v_zero,
_mm512_int2mask(15),(float *)&ffc[j+kk]);
v_afdt = _mm512_mask_loadunpackhi_ps(v_afdt,
_mm512_int2mask(15),(float *)&ffc[j+kk+8]);
v_afdt = _mm512_permute4f128_ps(v_afdt,0);
v_afdt = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_afdt,
_mm512_int2mask(13260),(__m512i)v_afdt,78);
v_afdt = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_afdt,
_mm512_int2mask(21845),(__m512i)v_afdt,177);
v_afdt = _mm512_mul_ps(v_adt,v_afdt);
/* update magnetic field half time step, ky > 0 */
/* zt1 = -cimagf(exyz[2+4*(j+kj)]) */
/* + crealf(exyz[2+4*(j+kj)])*_Complex_I; */
/* zt2 = -cimagf(exyz[1+4*(j+kj)]) */
/* + crealf(exyz[1+4*(j+kj)])*_Complex_I; */
/* zt3 = -cimagf(exyz[4*(j+kj)]) */
/* + crealf(exyz[4*(j+kj)])*_Complex_I; */
v_zt4 = _mm512_load_ps((float *)&exyz[4*(j+kj)]);
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero,
v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* zt4 = bxyz[4*(j+kj)] - dth*(dky*zt1); */
/* zt5 = bxyz[1+4*(j+kj)] + dth*(dkx*zt1); */
/* zt6 = bxyz[2+4*(j+kj)] - dth*(dkx*zt2 - dky*zt3); */
v_zt1 = _mm512_mul_ps(v_dk1,v_zt3);
v_zt2 = _mm512_mul_ps(v_dk2,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt2 = _mm512_load_ps((float *)&bxyz[4*(j+kj)]);
v_zt5 = _mm512_sub_ps(v_zt2,v_zt1);
/* update electric field whole time step */
/* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */
/* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */
/* zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690),v_zero,
v_zt5);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* zt7 = exyz[4*(j+kj)] + cdt*(dky*zt1) - afdt*cu[4*(j+kj)]; */
/* zt8 = exyz[1+4*(j+kj)] - cdt*(dkx*zt1) - afdt*cu[1+4*(j+kj)]; */
/* zt9 = exyz[2+4*(j+kj)] + cdt*(dkx*zt2 - dky*zt3) */
/* - afdt*cu[2+4*(j+kj)]; */
v_zt1 = _mm512_mul_ps(v_dk1,v_zt3);
v_zt2 = _mm512_mul_ps(v_dk2,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2),
v_zt4);
v_zt2 = _mm512_load_ps((float *)&cu[4*(j+kj)]);
v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191),
v_afdt,v_zt2);
v_zt4 = _mm512_sub_ps(v_zt1,v_zt2);
/* update magnetic field half time step and store electric field */
/* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */
/* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */
/* zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero,
v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* exyz[4*(j+kj)] = zt7; */
/* exyz[1+4*(j+kj)] = zt8; */
/* exyz[2+4*(j+kj)] = zt9; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255),
v_zero);
_mm512_mask_store_ps((float *)&exyz[4*(j+kj)],
_mm512_int2mask(65280),v_zt4);
}
else {
_mm512_store_ps((float *)&exyz[4*(j+kj)],v_zt4);
}
/* ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); */
v_zt6 = _mm512_mul_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4));
/* zt4 -= dth*(dky*zt1); */
/* zt5 += dth*(dkx*zt1); */
/* zt6 -= dth*(dkx*zt2 - dky*zt3); */
v_zt1 = _mm512_mul_ps(v_dk1,v_zt3);
v_zt2 = _mm512_mul_ps(v_dk2,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt5 = _mm512_sub_ps(v_zt5,v_zt1);
/* bxyz[4*(j+kj)] = zt4; */
/* bxyz[1+4*(j+kj)] = zt5; */
/* bxyz[2+4*(j+kj)] = zt6; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255),
v_zero);
_mm512_mask_store_ps((float *)&bxyz[4*(j+kj)],
_mm512_int2mask(65280),v_zt5);
}
else {
_mm512_store_ps((float *)&bxyz[4*(j+kj)],v_zt5);
}
/* wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); */
v_zt7 = _mm512_mul_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5));
/* update magnetic field half time step, ky < 0 */
/* zt1 = -cimagf(exyz[2+4*(j+k1)]) */
/* + crealf(exyz[2+4*(j+k1)])*_Complex_I; */
/* zt2 = -cimagf(exyz[1+4*(j+k1)]) */
/* + crealf(exyz[1+4*(j+k1)])*_Complex_I; */
/* zt3 = -cimagf(exyz[4*(j+k1)]) */
/* + crealf(exyz[4*(j+k1)])*_Complex_I; */
v_zt4 = _mm512_load_ps((float *)&exyz[4*(j+k1)]);
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero,
v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
v_at2 = _mm512_mask_sub_ps(v_dk1,_mm512_int2mask(12336),v_zero,
v_dk1);
v_at3 = _mm512_mask_sub_ps(v_dk2,_mm512_int2mask(771),v_zero,
v_dk2);
/* zt4 = bxyz[4*(j+k1)] + dth*(dky*zt1); */
/* zt5 = bxyz[1+4*(j+k1)] + dth*(dkx*zt1); */
/* zt6 = bxyz[2+4*(j+k1)] - dth*(dkx*zt2 + dky*zt3); */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt2 = _mm512_load_ps((float *)&bxyz[4*(j+k1)]);
v_zt5 = _mm512_sub_ps(v_zt2,v_zt1);
/* update electric field whole time step */
/* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */
/* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */
/* zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690),v_zero,
v_zt5);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* zt7 = exyz[4*(j+k1)] - cdt*(dky*zt1) - afdt*cu[4*(j+k1)]; */
/* zt8 = exyz[1+4*(j+k1)] - cdt*(dkx*zt1) - afdt*cu[1+4*(j+k1)]; */
/* zt9 = exyz[2+4*(j+k1)] + cdt*(dkx*zt2 + dky*zt3) */
/* - afdt*cu[2+4*(j+k1)]; */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2),v_zt4);
v_zt2 = _mm512_load_ps((float *)&cu[4*(j+k1)]);
v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191),
v_afdt,v_zt2);
v_zt4 = _mm512_sub_ps(v_zt1,v_zt2);
/* update magnetic field half time step and store electric field */
/* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */
/* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */
/* zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero,
v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* exyz[4*(j+k1)] = zt7; */
/* exyz[1+4*(j+k1)] = zt8; */
/* exyz[2+4*(j+k1)] = zt9; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255),
v_zero);
_mm512_mask_store_ps((float *)&exyz[4*(j+k1)],
_mm512_int2mask(65280),v_zt4);
}
else {
_mm512_store_ps((float *)&exyz[4*(j+k1)],v_zt4);
}
/* ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); */
v_zt6 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4),
v_zt6);
/* zt4 += dth*(dky*zt1); */
/* zt5 += dth*(dkx*zt1); */
/* zt6 -= dth*(dkx*zt2 + dky*zt3); */
v_zt1 = _mm512_mul_ps(v_at2,v_zt3);
v_zt2 = _mm512_mul_ps(v_at3,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt5 = _mm512_sub_ps(v_zt5,v_zt1);
/* bxyz[4*(j+k1)] = zt4; */
/* bxyz[1+4*(j+k1)] = zt5; */
/* bxyz[2+4*(j+k1)] = zt6; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255),
v_zero);
_mm512_mask_store_ps((float *)&bxyz[4*(j+k1)],
_mm512_int2mask(65280),v_zt5);
}
else {
_mm512_store_ps((float *)&bxyz[4*(j+k1)],v_zt5);
}
/* wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); */
v_zt7 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5),
v_zt7);
/* convert to double precision before accumulating */
v_ws = _mm512_add_pd(v_ws,_mm512_cvtpslo_pd(v_zt6));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt6,78));
v_ws = _mm512_add_pd(v_ws,v_d);
v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt7));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt7,78));
v_wp = _mm512_add_pd(v_wp,v_d);
/* bxyz[4*(j+kj+l1)] = zero; */
/* bxyz[1+4*(j+kj+l1)] = zero; */
/* bxyz[2+4*(j+kj+l1)] = zero; */
_mm512_store_ps((float *)&bxyz[4*(j+kj+l1)],v_zero);
/* exyz[4*(j+kj+l1)] = zero; */
/* exyz[1+4*(j+kj+l1)] = zero; */
/* exyz[2+4*(j+kj+l1)] = zero; */
_mm512_store_ps((float *)&exyz[4*(j+kj+l1)],v_zero);
/* bxyz[4*(j+k1+l1)] = zero; */
/* bxyz[1+4*(j+k1+l1)] = zero; */
/* bxyz[2+4*(j+k1+l1)] = zero; */
_mm512_store_ps((float *)&bxyz[4*(j+k1+l1)],v_zero);
/* exyz[4*(j+k1+l1)] = zero; */
/* exyz[1+4*(j+k1+l1)] = zero; */
/* exyz[2+4*(j+k1+l1)] = zero; */
_mm512_store_ps((float *)&exyz[4*(j+k1+l1)],v_zero);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
dkx = dnx*(float) j;
afdt = adt*cimagf(ffc[j+kk]);
/* update magnetic field half time step, ky > 0 */
zt1 = -cimagf(exyz[2+4*(j+kj)])
+ crealf(exyz[2+4*(j+kj)])*_Complex_I;
zt2 = -cimagf(exyz[1+4*(j+kj)])
+ crealf(exyz[1+4*(j+kj)])*_Complex_I;
zt3 = -cimagf(exyz[4*(j+kj)])
+ crealf(exyz[4*(j+kj)])*_Complex_I;
zt4 = bxyz[4*(j+kj)] - dth*(dky*zt1);
zt5 = bxyz[1+4*(j+kj)] + dth*(dkx*zt1);
zt6 = bxyz[2+4*(j+kj)] - dth*(dkx*zt2 - dky*zt3);
/* update electric field whole time step */
zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I;
zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I;
zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I;
zt7 = exyz[4*(j+kj)] + cdt*(dky*zt1) - afdt*cu[4*(j+kj)];
zt8 = exyz[1+4*(j+kj)] - cdt*(dkx*zt1) - afdt*cu[1+4*(j+kj)];
zt9 = exyz[2+4*(j+kj)] + cdt*(dkx*zt2 - dky*zt3)
- afdt*cu[2+4*(j+kj)];
/* update magnetic field half time step and store electric field */
zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I;
zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I;
zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I;
exyz[4*(j+kj)] = zt7;
exyz[1+4*(j+kj)] = zt8;
exyz[2+4*(j+kj)] = zt9;
at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9));
ws += (double) at1;
zt4 -= dth*(dky*zt1);
zt5 += dth*(dkx*zt1);
zt6 -= dth*(dkx*zt2 - dky*zt3);
bxyz[4*(j+kj)] = zt4;
bxyz[1+4*(j+kj)] = zt5;
bxyz[2+4*(j+kj)] = zt6;
at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6));
wp += (double) at1;
/* update magnetic field half time step, ky < 0 */
zt1 = -cimagf(exyz[2+4*(j+k1)])
+ crealf(exyz[2+4*(j+k1)])*_Complex_I;
zt2 = -cimagf(exyz[1+4*(j+k1)])
+ crealf(exyz[1+4*(j+k1)])*_Complex_I;
zt3 = -cimagf(exyz[4*(j+k1)])
+ crealf(exyz[4*(j+k1)])*_Complex_I;
zt4 = bxyz[4*(j+k1)] + dth*(dky*zt1);
zt5 = bxyz[1+4*(j+k1)] + dth*(dkx*zt1);
zt6 = bxyz[2+4*(j+k1)] - dth*(dkx*zt2 + dky*zt3);
/* update electric field whole time step */
zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I;
zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I;
zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I;
zt7 = exyz[4*(j+k1)] - cdt*(dky*zt1) - afdt*cu[4*(j+k1)];
zt8 = exyz[1+4*(j+k1)] - cdt*(dkx*zt1) - afdt*cu[1+4*(j+k1)];
zt9 = exyz[2+4*(j+k1)] + cdt*(dkx*zt2 + dky*zt3)
- afdt*cu[2+4*(j+k1)];
/* update magnetic field half time step and store electric field */
zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I;
zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I;
zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I;
exyz[4*(j+k1)] = zt7;
exyz[1+4*(j+k1)] = zt8;
exyz[2+4*(j+k1)] = zt9;
at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9));
ws += (double) at1;
zt4 += dth*(dky*zt1);
zt5 += dth*(dkx*zt1);
zt6 -= dth*(dkx*zt2 + dky*zt3);
bxyz[4*(j+k1)] = zt4;
bxyz[1+4*(j+k1)] = zt5;
bxyz[2+4*(j+k1)] = zt6;
at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6));
wp += (double) at1;
bxyz[4*(j+kj+l1)] = zero;
bxyz[1+4*(j+kj+l1)] = zero;
bxyz[2+4*(j+kj+l1)] = zero;
exyz[4*(j+kj+l1)] = zero;
exyz[1+4*(j+kj+l1)] = zero;
exyz[2+4*(j+kj+l1)] = zero;
bxyz[4*(j+k1+l1)] = zero;
bxyz[1+4*(j+k1+l1)] = zero;
bxyz[2+4*(j+k1+l1)] = zero;
exyz[4*(j+k1+l1)] = zero;
exyz[1+4*(j+k1+l1)] = zero;
exyz[2+4*(j+k1+l1)] = zero;
}
}
/* mode numbers kx = 0, nx/2 */
for (k = 1; k < nyh; k++) {
dky = dny*(float) k;
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
afdt = adt*cimagf(ffc[kk]);
/* update magnetic field half time step */
zt1 = -cimagf(exyz[2+4*(kj)]) + crealf(exyz[2+4*(kj)])*_Complex_I;
zt3 = -cimagf(exyz[4*(kj)]) + crealf(exyz[4*(kj)])*_Complex_I;
zt4 = bxyz[4*kj] - dth*(dky*zt1);
zt6 = bxyz[2+4*kj] + dth*(dky*zt3);
/* update electric field whole time step */
zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I;
zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I;
zt7 = exyz[4*kj] + cdt*(dky*zt1) - afdt*cu[4*kj];
zt9 = exyz[2+4*kj] - cdt*(dky*zt3) - afdt*cu[2+4*kj];
/* update magnetic field half time step and store electric field */
zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I;
zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I;
exyz[4*kj] = zt7;
exyz[1+4*kj] = zero;
exyz[2+4*kj] = zt9;
at1 = anorm*(zt7*conjf(zt7) + zt9*conjf(zt9));
ws += (double) at1;
zt4 -= dth*(dky*zt1);
zt6 += dth*(dky*zt3);
bxyz[4*kj] = zt4;
bxyz[1+4*kj] = zero;
bxyz[2+4*kj] = zt6;
at1 = anorm*(zt4*conjf(zt4) + zt6*conjf(zt6));
wp += (double) at1;
bxyz[4*k1] = zero;
bxyz[1+4*k1] = zero;
bxyz[2+4*k1] = zero;
exyz[4*k1] = zero;
exyz[1+4*k1] = zero;
exyz[2+4*k1] = zero;
bxyz[4*(kj+l1)] = zero;
bxyz[1+4*(kj+l1)] = zero;
bxyz[2+4*(kj+l1)]= zero;
exyz[4*(kj+l1)] = zero;
exyz[1+4*(kj+l1)] = zero;
exyz[2+4*(kj+l1)] = zero;
bxyz[4*(k1+l1)] = zero;
bxyz[1+4*(k1+l1)] = zero;
bxyz[2+4*(k1+l1)] = zero;
exyz[4*(k1+l1)] = zero;
exyz[1+4*(k1+l1)] = zero;
exyz[2+4*(k1+l1)] = zero;
}
/* mode numbers ky = 0, ny/2 */
k1 = nxvh*nyh;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* dkx = dnx*(float) j; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dkx = _mm512_mul_ps(v_dnx,v_dkx);
/* add kx to curl operators */
v_dk1 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(3084),v_dkx);
v_dk2 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(12336),v_dkx);
/* afdt = adt*cimagf(ffc[j]); */
v_afdt = _mm512_mask_loadunpacklo_ps(v_zero,
_mm512_int2mask(15),(float *)&ffc[j]);
v_afdt = _mm512_mask_loadunpackhi_ps(v_afdt,
_mm512_int2mask(15),(float *)&ffc[j+8]);
v_afdt = _mm512_permute4f128_ps(v_afdt,0);
v_afdt = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_afdt,
_mm512_int2mask(13260),(__m512i)v_afdt,78);
v_afdt = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_afdt,
_mm512_int2mask(21845),(__m512i)v_afdt,177);
v_afdt = _mm512_mul_ps(v_adt,v_afdt);
/* update magnetic field half time step */
/* zt1 = -cimagf(exyz[2+4*j]) + crealf(exyz[2+4*j])*_Complex_I; */
/* zt2 = -cimagf(exyz[1+4*j]) + crealf(exyz[1+4*j])*_Complex_I; */
v_zt4 = _mm512_load_ps((float *)&exyz[4*j]);
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero,
v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* zt5 = bxyz[1+4*j] + dth*(dkx*zt1); */
/* zt6 = bxyz[2+4*j] - dth*(dkx*zt2); */
v_zt1 = _mm512_mul_ps(v_dk1,v_zt3);
v_zt2 = _mm512_mul_ps(v_dk2,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt2 = _mm512_load_ps((float *)&bxyz[4*j]);
v_zt5 = _mm512_sub_ps(v_zt2,v_zt1);
/* update electric field whole time step */
/* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */
/* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690),v_zero,
v_zt5);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* zt8 = exyz[1+4*j] - cdt*(dkx*zt1) - afdt*cu[1+4*j]; */
/* zt9 = exyz[2+4*j] + cdt*(dkx*zt2) - afdt*cu[2+4*j]; */
v_zt1 = _mm512_mul_ps(v_dk1,v_zt3);
v_zt2 = _mm512_mul_ps(v_dk2,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2),v_zt4);
v_zt2 = _mm512_load_ps((float *)&cu[4*j]);
v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191),v_afdt,
v_zt2);
v_zt4 = _mm512_sub_ps(v_zt1,v_zt2);
/* update magnetic field half time step and store electric field */
/* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */
/* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */
v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero,
v_zt4);
v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177);
/* exyz[4*j] = zero; */
/* exyz[1+4*j] = zt8; */
/* exyz[2+4*j] = zt9; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255),v_zero);
_mm512_mask_store_ps((float *)&exyz[4*j],
_mm512_int2mask(65280),v_zt4);
}
else {
_mm512_store_ps((float *)&exyz[4*j],v_zt4);
}
/* ws += anorm*(zt8*conjf(zt8) + zt9*conjf(zt9)); */
v_zt6 = _mm512_mul_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4));
/* zt5 += dth*(dkx*zt1); */
/* zt6 -= dth*(dkx*zt2); */
v_zt1 = _mm512_mul_ps(v_dk1,v_zt3);
v_zt2 = _mm512_mul_ps(v_dk2,v_zt3);
v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1);
v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2);
v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2));
v_zt5 = _mm512_sub_ps(v_zt5,v_zt1);
/* bxyz[4*j] = zero; */
/* bxyz[1+4*j] = zt5; */
/* bxyz[2+4*j] = zt6; */
/* zero out kx = 0 mode */
if (j==0) {
v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255),v_zero);
_mm512_mask_store_ps((float *)&bxyz[4*j],
_mm512_int2mask(65280),v_zt5);
}
else {
_mm512_store_ps((float *)&bxyz[4*j],v_zt5);
}
/* wp += anorm*(zt5*conjf(zt5) + zt6*conjf(zt6)); */
v_zt7 = _mm512_mul_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5));
/* convert to double precision before accumulating */
v_ws = _mm512_add_pd(v_ws,_mm512_cvtpslo_pd(v_zt6));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt6,78));
v_ws = _mm512_add_pd(v_ws,v_d);
v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt7));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt7,78));
v_wp = _mm512_add_pd(v_wp,v_d);
/* bxyz[4*(j+k1)] = zero; */
/* bxyz[1+4*(j+k1)] = zero; */
/* bxyz[2+4*(j+k1)] = zero; */
_mm512_store_ps((float *)&bxyz[4*(j+k1)],v_zero);
/* exyz[4*(j+k1)] = zero; */
/* exyz[1+4*(j+k1)] = zero; */
/* exyz[2+4*(j+k1)] = zero; */
_mm512_store_ps((float *)&exyz[4*(j+k1)],v_zero);
/* bxyz[4*(j+l1)] = zero; */
/* bxyz[1+4*(j+l1)] = zero; */
/* bxyz[2+4*(j+l1)] = zero; */
_mm512_store_ps((float *)&bxyz[4*(j+l1)],v_zero);
/* exyz[4*(j+l1)] = zero; */
/* exyz[1+4*(j+l1)] = zero; */
/* exyz[2+4*(j+l1)] = zero; */
_mm512_store_ps((float *)&exyz[4*(j+l1)],v_zero);
/* bxyz[4*(j+k1+l1)] = zero; */
/* bxyz[1+4*(j+k1+l1)] = zero; */
/* bxyz[2+4*(j+k1+l1)] = zero; */
_mm512_store_ps((float *)&bxyz[4*(j+k1+l1)],v_zero);
/* exyz[4*(j+k1+l1)] = zero; */
/* exyz[1+4*(j+k1+l1)] = zero; */
/* exyz[2+4*(j+k1+l1)] = zero; */
_mm512_store_ps((float *)&exyz[4*(j+k1+l1)],v_zero);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
dkx = dnx*(float) j;
afdt = adt*cimagf(ffc[j]);
/* update magnetic field half time step */
zt1 = -cimagf(exyz[2+4*j]) + crealf(exyz[2+4*j])*_Complex_I;
zt2 = -cimagf(exyz[1+4*j]) + crealf(exyz[1+4*j])*_Complex_I;
zt5 = bxyz[1+4*j] + dth*(dkx*zt1);
zt6 = bxyz[2+4*j] - dth*(dkx*zt2);
/* update electric field whole time step */
zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I;
zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I;
zt8 = exyz[1+4*j] - cdt*(dkx*zt1) - afdt*cu[1+4*j];
zt9 = exyz[2+4*j] + cdt*(dkx*zt2) - afdt*cu[2+4*j];
/* update magnetic field half time step and store electric field */
zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I;
zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I;
exyz[4*j] = zero;
exyz[1+4*j] = zt8;
exyz[2+4*j] = zt9;
at1 = anorm*(zt8*conjf(zt8) + zt9*conjf(zt9));
ws += (double) at1;
zt5 += dth*(dkx*zt1);
zt6 -= dth*(dkx*zt2);
bxyz[4*j] = zero;
bxyz[1+4*j] = zt5;
bxyz[2+4*j] = zt6;
at1 = anorm*(zt5*conjf(zt5) + zt6*conjf(zt6));
wp += (double) at1;
bxyz[4*(j+k1)] = zero;
bxyz[1+4*(j+k1)] = zero;
bxyz[2+4*(j+k1)] = zero;
exyz[4*(j+k1)] = zero;
exyz[1+4*(j+k1)] = zero;
exyz[2+4*(j+k1)] = zero;
bxyz[4*(j+l1)] = zero;
bxyz[1+4*(j+l1)] = zero;
bxyz[2+4*(j+l1)] = zero;
exyz[4*(j+l1)] = zero;
exyz[1+4*(j+l1)] = zero;
exyz[2+4*(j+l1)] = zero;
bxyz[4*(j+k1+l1)] = zero;
bxyz[1+4*(j+k1+l1)] = zero;
bxyz[2+4*(j+k1+l1)] = zero;
exyz[4*(j+k1+l1)] = zero;
exyz[1+4*(j+k1+l1)] = zero;
exyz[2+4*(j+k1+l1)] = zero;
}
bxyz[0] = zero;
bxyz[1] = zero;
bxyz[2] = zero;
exyz[0] = zero;
exyz[1] = zero;
exyz[2]= zero;
bxyz[4*k1] = zero;
bxyz[1+4*k1] = zero;
bxyz[2+4*k1] = zero;
exyz[4*k1] = zero;
exyz[1+4*k1] = zero;
exyz[2+4*k1] = zero;
bxyz[4*l1] = zero;
bxyz[1+4*l1] = zero;
bxyz[2+4*l1] = zero;
exyz[4*l1] = zero;
exyz[1+4*l1] = zero;
exyz[2+4*l1] = zero;
bxyz[4*(k1+l1)] = zero;
bxyz[1+4*(k1+l1)] = zero;
bxyz[2+4*(k1+l1)] = zero;
exyz[4*(k1+l1)] = zero;
exyz[1+4*(k1+l1)] = zero;
exyz[2+4*(k1+l1)] = zero;
d0 = _mm512_reduce_add_pd(v_ws);
*wf = (ws + d0)*((float) nx)*((float) ny)*((float) nz);
d0 = _mm512_reduce_add_pd(v_wp);
*wm = c2*(wp + d0)*((float) nx)*((float) ny)*((float) nz);
return;
}
/*--------------------------------------------------------------------*/
void ckncemfield3(float complex fxyz[], float complex exyz[],
float complex ffc[], int isign, int nx, int ny,
int nz, int nxvh, int nyv, int nzv, int nxhd,
int nyhd, int nzhd) {
/* this subroutine either adds complex vector fields if isign > 0
or copies complex vector fields if isign < 0
includes additional smoothing
requires KNC, fxyz, exyz, ffc need to be 64 byte aligned
nxhd needs to be a multiple of 8
nxvh needs to be a multiple of 2
fxyz, exyz needs to have 4 components
local data */
int j, k, l, nxh, nyh, nzh, nxhs, itn, k1, l1, kk, kj, ll, lj;
int nxyhd, nxvyh;
float at1;
__m512 v_at1, v_zero, v_zt1, v_zt2;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
nzh = 1 > nz/2 ? 1 : nz/2;
nxhs = 2*(nxh/2);
itn = 1 > nxhs ? 1 : nxhs;
nxyhd = nxhd*nyhd;
nxvyh = nxvh*nyv;
v_zero = _mm512_setzero_ps();
/* add the fields */
if (isign > 0) {
for (l = 1; l < nzh; l++) {
ll = nxyhd*l;
lj = nxvyh*l;
l1 = nxvyh*nz - lj;
for (k = 1; k < nyh; k++) {
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* at1 = cimagf(ffc[j+kk+ll]); */
v_at1 = _mm512_mask_loadunpacklo_ps(v_zero,
_mm512_int2mask(15),(float *)&ffc[j+kk+ll]);
v_at1 = _mm512_mask_loadunpackhi_ps(v_at1,
_mm512_int2mask(15),(float *)&ffc[j+kk+ll+8]);
v_at1 = _mm512_permute4f128_ps(v_at1,0);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(13260),(__m512i)v_at1,78);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(21845),(__m512i)v_at1,177);
/* fxyz[4*(j+kj+lj)] += exyz[4*(j+kj+lj)]*at1; */
/* fxyz[1+4*(j+kj+lj)] += exyz[1+4*(j+kj+lj)]*at1; */
/* fxyz[2+4*(j+kj+lj)] += exyz[2+4*(j+kj+lj)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+kj+lj)]);
v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+kj+lj)]);
v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2);
_mm512_store_ps((float *)&fxyz[4*(j+kj+lj)],v_zt2);
/* fxyz[4*(j+k1+lj)] += exyz[4*(j+k1+lj)]*at1; */
/* fxyz[1+4*(j+k1+lj)] += exyz[1+4*(j+k1+lj)]*at1; */
/* fxyz[2+4*(j+k1+lj)] += exyz[2+4*(j+k1+lj)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+lj)]);
v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+k1+lj)]);
v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2);
_mm512_store_ps((float *)&fxyz[4*(j+k1+lj)],v_zt2);
/* fxyz[4*(j+kj+l1)] += exyz[4*(j+kj+l1)]*at1; */
/* fxyz[1+4*(j+kj+l1)] += exyz[1+4*(j+kj+l1)]*at1; */
/* fxyz[2+4*(j+kj+l1)] += exyz[2+4*(j+kj+l1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+kj+l1)]);
v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+kj+l1)]);
v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2);
_mm512_store_ps((float *)&fxyz[4*(j+kj+l1)],v_zt2);
/* fxyz[4*(j+k1+l1)] += exyz[4*(j+k1+l1)]*at1; */
/* fxyz[1+4*(j+k1+l1)] += exyz[1+4*(j+k1+l1)]*at1; */
/* fxyz[2+4*(j+k1+l1)] += exyz[2+4*(j+k1+l1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]);
v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+k1+l1)]);
v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2);
_mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zt2);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
at1 = cimagf(ffc[j+kk+ll]);
fxyz[4*(j+kj+lj)] += exyz[4*(j+kj+lj)]*at1;
fxyz[1+4*(j+kj+lj)] += exyz[1+4*(j+kj+lj)]*at1;
fxyz[2+4*(j+kj+lj)] += exyz[2+4*(j+kj+lj)]*at1;
fxyz[4*(j+k1+lj)] += exyz[4*(j+k1+lj)]*at1;
fxyz[1+4*(j+k1+lj)] += exyz[1+4*(j+k1+lj)]*at1;
fxyz[2+4*(j+k1+lj)] += exyz[2+4*(j+k1+lj)]*at1;
fxyz[4*(j+kj+l1)] += exyz[4*(j+kj+l1)]*at1;
fxyz[1+4*(j+kj+l1)] += exyz[1+4*(j+kj+l1)]*at1;
fxyz[2+4*(j+kj+l1)] += exyz[2+4*(j+kj+l1)]*at1;
fxyz[4*(j+k1+l1)] += exyz[4*(j+k1+l1)]*at1;
fxyz[1+4*(j+k1+l1)] += exyz[1+4*(j+k1+l1)]*at1;
fxyz[2+4*(j+k1+l1)] += exyz[2+4*(j+k1+l1)]*at1;
}
}
k1 = nxvh*nyh;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* at1 = cimagf(ffc[j+ll]); */
v_at1 = _mm512_mask_loadunpacklo_ps(v_zero,
_mm512_int2mask(15),(float *)&ffc[j+ll]);
v_at1 = _mm512_mask_loadunpackhi_ps(v_at1,
_mm512_int2mask(15),(float *)&ffc[j+kk+8]);
v_at1 = _mm512_permute4f128_ps(v_at1,0);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(13260),(__m512i)v_at1,78);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(21845),(__m512i)v_at1,177);
/* fxyz[4*(j+lj)] += exyz[4*(j+lj)]*at1; */
/* fxyz[1+4*(j+lj)] += exyz[1+4*(j+lj)]*at1; */
/* fxyz[2+4*(j+lj)] += exyz[2+4*(j+lj)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+lj)]);
v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+lj)]);
v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2);
_mm512_store_ps((float *)&fxyz[4*(j+lj)],v_zt2);
/* fxyz[4*(j+k1+lj)] += exyz[4*(j+k1+lj)]*at1; */
/* fxyz[1+4*(j+k1+lj)] += exyz[1+4*(j+k1+lj)]*at1; */
/* fxyz[2+4*(j+k1+lj)] += exyz[2+4*(j+k1+lj)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+lj)]);
v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+k1+lj)]);
v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2);
_mm512_store_ps((float *)&fxyz[4*(j+k1+lj)],v_zt2);
/* fxyz[4*(j+l1)] += exyz[4*(j+l1)]*at1; */
/* fxyz[1+4*(j+l1)] += exyz[1+4*(j+l1)]*at1; */
/* fxyz[2+4*(j+l1)] += exyz[2+4*(j+l1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+l1)]);
v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+l1)]);
v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2);
_mm512_store_ps((float *)&fxyz[4*(j+l1)],v_zt2);
/* fxyz[4*(j+k1+l1)] += exyz[4*(j+k1+l1)]*at1; */
/* fxyz[1+4*(j+k1+l1)] += exyz[1+4*(j+k1+l1)]*at1; */
/* fxyz[2+4*(j+k1+l1)] += exyz[2+4*(j+k1+l1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]);
v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+k1+l1)]);
v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2);
_mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zt2);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
at1 = cimagf(ffc[j+ll]);
fxyz[4*(j+lj)] += exyz[4*(j+lj)]*at1;
fxyz[1+4*(j+lj)] += exyz[1+4*(j+lj)]*at1;
fxyz[2+4*(j+lj)] += exyz[2+4*(j+lj)]*at1;
fxyz[4*(j+k1+lj)] += exyz[4*(j+k1+lj)]*at1;
fxyz[1+4*(j+k1+lj)] += exyz[1+4*(j+k1+lj)]*at1;
fxyz[2+4*(j+k1+lj)] += exyz[2+4*(j+k1+lj)]*at1;
fxyz[4*(j+l1)] += exyz[4*(j+l1)]*at1;
fxyz[1+4*(j+l1)] += exyz[1+4*(j+l1)]*at1;
fxyz[2+4*(j+l1)] += exyz[2+4*(j+l1)]*at1;
fxyz[4*(j+k1+l1)] += exyz[4*(j+k1+l1)]*at1;
fxyz[1+4*(j+k1+l1)] += exyz[1+4*(j+k1+l1)]*at1;
fxyz[2+4*(j+k1+l1)] += exyz[2+4*(j+k1+l1)]*at1;
}
}
l1 = nxvyh*nzh;
for (k = 1; k < nyh; k++) {
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* at1 = cimagf(ffc[j+kk]); */
v_at1 = _mm512_mask_loadunpacklo_ps(v_zero,
_mm512_int2mask(15),(float *)&ffc[j+kk]);
v_at1 = _mm512_mask_loadunpackhi_ps(v_at1,
_mm512_int2mask(15),(float *)&ffc[j+kk+8]);
v_at1 = _mm512_permute4f128_ps(v_at1,0);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(13260),(__m512i)v_at1,78);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(21845),(__m512i)v_at1,177);
/* fxyz[4*(j+kj)] += exyz[4*(j+kj)]*at1; */
/* fxyz[1+4*(j+kj)] += exyz[1+4*(j+kj)]*at1; */
/* fxyz[2+4*(j+kj)] += exyz[2+4*(j+kj)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+kj)]);
v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+kj)]);
v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2);
_mm512_store_ps((float *)&fxyz[4*(j+kj)],v_zt2);
/* fxyz[4*(j+k1)] += exyz[4*(j+k1)]*at1; */
/* fxyz[1+4*(j+k1)] += exyz[1+4*(j+k1)]*at1; */
/* fxyz[2+4*(j+k1)] += exyz[2+4*(j+k1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1)]);
v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+k1)]);
v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2);
_mm512_store_ps((float *)&fxyz[4*(j+k1)],v_zt2);
/* fxyz[4*(j+kj+l1)] += exyz[4*(j+kj+l1)]*at1; */
/* fxyz[1+4*(j+kj+l1)] += exyz[1+4*(j+kj+l1)]*at1; */
/* fxyz[2+4*(j+kj+l1)] += exyz[2+4*(j+kj+l1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+kj+l1)]);
v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+kj+l1)]);
v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2);
_mm512_store_ps((float *)&fxyz[4*(j+kj+l1)],v_zt2);
/* fxyz[4*(j+k1+l1)] += exyz[4*(j+k1+l1)]*at1; */
/* fxyz[1+4*(j+k1+l1)] += exyz[1+4*(j+k1+l1)]*at1; */
/* fxyz[2+4*(j+k1+l1)] += exyz[2+4*(j+k1+l1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]);
v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+k1+l1)]);
v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2);
_mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zt2);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
at1 = cimagf(ffc[j+kk]);
fxyz[4*(j+kj)] += exyz[4*(j+kj)]*at1;
fxyz[1+4*(j+kj)] += exyz[1+4*(j+kj)]*at1;
fxyz[2+4*(j+kj)] += exyz[2+4*(j+kj)]*at1;
fxyz[4*(j+k1)] += exyz[4*(j+k1)]*at1;
fxyz[1+4*(j+k1)] += exyz[1+4*(j+k1)]*at1;
fxyz[2+4*(j+k1)] += exyz[2+4*(j+k1)]*at1;
fxyz[4*(j+kj+l1)] += exyz[4*(j+kj+l1)]*at1;
fxyz[1+4*(j+kj+l1)] += exyz[1+4*(j+kj+l1)]*at1;
fxyz[2+4*(j+kj+l1)] += exyz[2+4*(j+kj+l1)]*at1;
fxyz[4*(j+k1+l1)] += exyz[4*(j+k1+l1)]*at1;
fxyz[1+4*(j+k1+l1)] += exyz[1+4*(j+k1+l1)]*at1;
fxyz[2+4*(j+k1+l1)] += exyz[2+4*(j+k1+l1)]*at1;
}
}
k1 = nxvh*nyh;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* at1 = cimagf(ffc[j]); */
v_at1 = _mm512_mask_loadunpacklo_ps(v_zero,_mm512_int2mask(15),
(float *)&ffc[j]);
v_at1 = _mm512_mask_loadunpackhi_ps(v_at1,
_mm512_int2mask(15),(float *)&ffc[j+8]);
v_at1 = _mm512_permute4f128_ps(v_at1,0);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(13260),(__m512i)v_at1,78);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(21845),(__m512i)v_at1,177);
/* fxyz[4*j] += exyz[4*j]*at1; */
/* fxyz[1+4*j] += exyz[1+4*j]*at1; */
/* fxyz[2+4*j] += exyz[2+4*j]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*j]);
v_zt2 = _mm512_load_ps((float *)&fxyz[4*j]);
v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2);
_mm512_store_ps((float *)&fxyz[4*j],v_zt2);
/* fxyz[4*(j+k1)] += exyz[4*(j+k1)]*at1; */
/* fxyz[1+4*(j+k1)] += exyz[1+4*(j+k1)]*at1; */
/* fxyz[2+4*(j+k1)] += exyz[2+4*(j+k1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1)]);
v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+k1)]);
v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2);
_mm512_store_ps((float *)&fxyz[4*(j+k1)],v_zt2);
/* fxyz[4*(j+l1)] += exyz[4*(j+l1)]*at1; */
/* fxyz[1+4*(j+l1)] += exyz[1+4*(j+l1)]*at1; */
/* fxyz[2+4*(j+l1)] += exyz[2+4*(j+l1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+l1)]);
v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+l1)]);
v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2);
_mm512_store_ps((float *)&fxyz[4*(j+l1)],v_zt2);
/* fxyz[4*(j+k1+l1)] += exyz[4*(j+k1+l1)]*at1; */
/* fxyz[1+4*(j+k1+l1)] += exyz[1+4*(j+k1+l1)]*at1; */
/* fxyz[2+4*(j+k1+l1)] += exyz[2+4*(j+k1+l1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]);
v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+k1+l1)]);
v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2);
_mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zt2);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
at1 = cimagf(ffc[j]);
fxyz[4*j] += exyz[4*j]*at1;
fxyz[1+4*j] += exyz[1+4*j]*at1;
fxyz[2+4*j] += exyz[2+4*j]*at1;
fxyz[4*(j+k1)] += exyz[4*(j+k1)]*at1;
fxyz[1+4*(j+k1)] += exyz[1+4*(j+k1)]*at1;
fxyz[2+4*(j+k1)] += exyz[2+4*(j+k1)]*at1;
fxyz[4*(j+l1)] += exyz[4*(j+l1)]*at1;
fxyz[1+4*(j+l1)] += exyz[1+4*(j+l1)]*at1;
fxyz[2+4*(j+l1)] += exyz[2+4*(j+l1)]*at1;
fxyz[4*(j+k1+l1)] += exyz[4*(j+k1+l1)]*at1;
fxyz[1+4*(j+k1+l1)] += exyz[1+4*(j+k1+l1)]*at1;
fxyz[2+4*(j+k1+l1)] += exyz[2+4*(j+k1+l1)]*at1;
}
}
/* copy the fields */
else if (isign < 0) {
for (l = 1; l < nzh; l++) {
ll = nxyhd*l;
lj = nxvyh*l;
l1 = nxvyh*nz - lj;
for (k = 1; k < nyh; k++) {
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* at1 = cimagf(ffc[j+kk+ll]); */
v_at1 = _mm512_mask_loadunpacklo_ps(v_zero,
_mm512_int2mask(15),(float *)&ffc[j+kk+ll]);
v_at1 = _mm512_mask_loadunpackhi_ps(v_at1,
_mm512_int2mask(15),(float *)&ffc[j+kk+ll+8]);
v_at1 = _mm512_permute4f128_ps(v_at1,0);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(13260),(__m512i)v_at1,78);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(21845),(__m512i)v_at1,177);
/* fxyz[4*(j+kj+lj)] = exyz[4*(j+kj+lj)]*at1; */
/* fxyz[1+4*(j+kj+lj)] = exyz[1+4*(j+kj+lj)]*at1; */
/* fxyz[2+4*(j+kj+lj)] = exyz[2+4*(j+kj+lj)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+kj+lj)]);
v_zt2 = _mm512_mul_ps(v_zt1,v_at1);
_mm512_store_ps((float *)&fxyz[4*(j+kj+lj)],v_zt2);
/* fxyz[4*(j+k1+lj)] = exyz[4*(j+k1+lj)]*at1; */
/* fxyz[1+4*(j+k1+lj)] = exyz[1+4*(j+k1+lj)]*at1; */
/* fxyz[2+4*(j+k1+lj)] = exyz[2+4*(j+k1+lj)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+lj)]);
v_zt2 = _mm512_mul_ps(v_zt1,v_at1);
_mm512_store_ps((float *)&fxyz[4*(j+k1+lj)],v_zt2);
/* fxyz[4*(j+kj+l1)] = exyz[4*(j+kj+l1)]*at1; */
/* fxyz[1+4*(j+kj+l1)] = exyz[1+4*(j+kj+l1)]*at1; */
/* fxyz[2+4*(j+kj+l1)] = exyz[2+4*(j+kj+l1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+kj+l1)]);
v_zt2 = _mm512_mul_ps(v_zt1,v_at1);
_mm512_store_ps((float *)&fxyz[4*(j+kj+l1)],v_zt2);
/* fxyz[4*(j+k1+l1)] = exyz[4*(j+k1+l1)]*at1; */
/* fxyz[1+4*(j+k1+l1)] = exyz[1+4*(j+k1+l1)]*at1; */
/* fxyz[2+4*(j+k1+l1)] = exyz[2+4*(j+k1+l1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]);
v_zt2 = _mm512_mul_ps(v_zt1,v_at1);
_mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zt2);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
at1 = cimagf(ffc[j+kk+ll]);
fxyz[4*(j+kj+lj)] = exyz[4*(j+kj+lj)]*at1;
fxyz[1+4*(j+kj+lj)] = exyz[1+4*(j+kj+lj)]*at1;
fxyz[2+4*(j+kj+lj)] = exyz[2+4*(j+kj+lj)]*at1;
fxyz[4*(j+k1+lj)] = exyz[4*(j+k1+lj)]*at1;
fxyz[1+4*(j+k1+lj)] = exyz[1+4*(j+k1+lj)]*at1;
fxyz[2+4*(j+k1+lj)] = exyz[2+4*(j+k1+lj)]*at1;
fxyz[4*(j+kj+l1)] = exyz[4*(j+kj+l1)]*at1;
fxyz[1+4*(j+kj+l1)] = exyz[1+4*(j+kj+l1)]*at1;
fxyz[2+4*(j+kj+l1)] = exyz[2+4*(j+kj+l1)]*at1;
fxyz[4*(j+k1+l1)] = exyz[4*(j+k1+l1)]*at1;
fxyz[1+4*(j+k1+l1)] = exyz[1+4*(j+k1+l1)]*at1;
fxyz[2+4*(j+k1+l1)] = exyz[2+4*(j+k1+l1)]*at1;
}
}
k1 = nxvh*nyh;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* at1 = cimagf(ffc[j+ll]); */
v_at1 = _mm512_mask_loadunpacklo_ps(v_zero,
_mm512_int2mask(15),(float *)&ffc[j+ll]);
v_at1 = _mm512_mask_loadunpackhi_ps(v_at1,
_mm512_int2mask(15),(float *)&ffc[j+kk+8]);
v_at1 = _mm512_permute4f128_ps(v_at1,0);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(13260),(__m512i)v_at1,78);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(21845),(__m512i)v_at1,177);
/* fxyz[4*(j+lj)] = exyz[4*(j+lj)]*at1; */
/* fxyz[1+4*(j+lj)] = exyz[1+4*(j+lj)]*at1; */
/* fxyz[2+4*(j+lj)] = exyz[2+4*(j+lj)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+lj)]);
v_zt2 = _mm512_mul_ps(v_zt1,v_at1);
_mm512_store_ps((float *)&fxyz[4*(j+lj)],v_zt2);
/* fxyz[4*(j+k1+lj)] = exyz[4*(j+k1+lj)]*at1; */
/* fxyz[1+4*(j+k1+lj)] = exyz[1+4*(j+k1+lj)]*at1; */
/* fxyz[2+4*(j+k1+lj)] = exyz[2+4*(j+k1+lj)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+lj)]);
v_zt2 = _mm512_mul_ps(v_zt1,v_at1);
_mm512_store_ps((float *)&fxyz[4*(j+k1+lj)],v_zt2);
/* fxyz[4*(j+l1)] = exyz[4*(j+l1)]*at1; */
/* fxyz[1+4*(j+l1)] = exyz[1+4*(j+l1)]*at1; */
/* fxyz[2+4*(j+l1)] = exyz[2+4*(j+l1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+l1)]);
v_zt2 = _mm512_mul_ps(v_zt1,v_at1);
_mm512_store_ps((float *)&fxyz[4*(j+l1)],v_zt2);
/* fxyz[4*(j+k1+l1)] = exyz[4*(j+k1+l1)]*at1; */
/* fxyz[1+4*(j+k1+l1)] = exyz[1+4*(j+k1+l1)]*at1; */
/* fxyz[2+4*(j+k1+l1)] = exyz[2+4*(j+k1+l1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]);
v_zt2 = _mm512_mul_ps(v_zt1,v_at1);
_mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zt2);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
at1 = cimagf(ffc[j+ll]);
fxyz[4*(j+lj)] = exyz[4*(j+lj)]*at1;
fxyz[1+4*(j+lj)] = exyz[1+4*(j+lj)]*at1;
fxyz[2+4*(j+lj)] = exyz[2+4*(j+lj)]*at1;
fxyz[4*(j+k1+lj)] = exyz[4*(j+k1+lj)]*at1;
fxyz[1+4*(j+k1+lj)] = exyz[1+4*(j+k1+lj)]*at1;
fxyz[2+4*(j+k1+lj)] = exyz[2+4*(j+k1+lj)]*at1;
fxyz[4*(j+l1)] = exyz[4*(j+l1)]*at1;
fxyz[1+4*(j+l1)] = exyz[1+4*(j+l1)]*at1;
fxyz[2+4*(j+l1)] = exyz[2+4*(j+l1)]*at1;
fxyz[4*(j+k1+l1)] = exyz[4*(j+k1+l1)]*at1;
fxyz[1+4*(j+k1+l1)] = exyz[1+4*(j+k1+l1)]*at1;
fxyz[2+4*(j+k1+l1)] = exyz[2+4*(j+k1+l1)]*at1;
}
}
l1 = nxvyh*nzh;
for (k = 1; k < nyh; k++) {
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* at1 = cimagf(ffc[j+kk]); */
v_at1 = _mm512_mask_loadunpacklo_ps(v_zero,
_mm512_int2mask(15),(float *)&ffc[j+kk]);
v_at1 = _mm512_mask_loadunpackhi_ps(v_at1,
_mm512_int2mask(15),(float *)&ffc[j+kk+8]);
v_at1 = _mm512_permute4f128_ps(v_at1,0);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(13260),(__m512i)v_at1,78);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(21845),(__m512i)v_at1,177);
/* fxyz[4*(j+kj)] = exyz[4*(j+kj)]*at1; */
/* fxyz[1+4*(j+kj)] = exyz[1+4*(j+kj)]*at1; */
/* fxyz[2+4*(j+kj)] = exyz[2+4*(j+kj)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+kj)]);
v_zt2 = _mm512_mul_ps(v_zt1,v_at1);
_mm512_store_ps((float *)&fxyz[4*(j+kj)],v_zt2);
/* fxyz[4*(j+k1)] = exyz[4*(j+k1)]*at1; */
/* fxyz[1+4*(j+k1)] = exyz[1+4*(j+k1)]*at1; */
/* fxyz[2+4*(j+k1)] = exyz[2+4*(j+k1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1)]);
v_zt2 = _mm512_mul_ps(v_zt1,v_at1);
_mm512_store_ps((float *)&fxyz[4*(j+k1)],v_zt2);
/* fxyz[4*(j+kj+l1)] = exyz[4*(j+kj+l1)]*at1; */
/* fxyz[1+4*(j+kj+l1)] = exyz[1+4*(j+kj+l1)]*at1; */
/* fxyz[2+4*(j+kj+l1)] = exyz[2+4*(j+kj+l1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+kj+l1)]);
v_zt2 = _mm512_mul_ps(v_zt1,v_at1);
_mm512_store_ps((float *)&fxyz[4*(j+kj+l1)],v_zt2);
/* fxyz[4*(j+k1+l1)] = exyz[4*(j+k1+l1)]*at1; */
/* fxyz[1+4*(j+k1+l1)] = exyz[1+4*(j+k1+l1)]*at1; */
/* fxyz[2+4*(j+k1+l1)] = exyz[2+4*(j+k1+l1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]);
v_zt2 = _mm512_mul_ps(v_zt1,v_at1);
_mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zt2);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
at1 = cimagf(ffc[j+kk]);
fxyz[4*(j+kj)] = exyz[4*(j+kj)]*at1;
fxyz[1+4*(j+kj)] = exyz[1+4*(j+kj)]*at1;
fxyz[2+4*(j+kj)] = exyz[2+4*(j+kj)]*at1;
fxyz[4*(j+k1)] = exyz[4*(j+k1)]*at1;
fxyz[1+4*(j+k1)] = exyz[1+4*(j+k1)]*at1;
fxyz[2+4*(j+k1)] = exyz[2+4*(j+k1)]*at1;
fxyz[4*(j+kj+l1)] = exyz[4*(j+kj+l1)]*at1;
fxyz[1+4*(j+kj+l1)] = exyz[1+4*(j+kj+l1)]*at1;
fxyz[2+4*(j+kj+l1)] = exyz[2+4*(j+kj+l1)]*at1;
fxyz[4*(j+k1+l1)] = exyz[4*(j+k1+l1)]*at1;
fxyz[1+4*(j+k1+l1)] = exyz[1+4*(j+k1+l1)]*at1;
fxyz[2+4*(j+k1+l1)] = exyz[2+4*(j+k1+l1)]*at1;
}
}
k1 = nxvh*nyh;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* at1 = cimagf(ffc[j]); */
v_at1 = _mm512_mask_loadunpacklo_ps(v_zero,_mm512_int2mask(15),
(float *)&ffc[j]);
v_at1 = _mm512_mask_loadunpackhi_ps(v_at1,
_mm512_int2mask(15),(float *)&ffc[j+8]);
v_at1 = _mm512_permute4f128_ps(v_at1,0);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(13260),(__m512i)v_at1,78);
v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1,
_mm512_int2mask(21845),(__m512i)v_at1,177);
/* fxyz[4*j] = exyz[4*j]*at1; */
/* fxyz[1+4*j] = exyz[1+4*j]*at1; */
/* fxyz[2+4*j] = exyz[2+4*j]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*j]);
v_zt2 = _mm512_mul_ps(v_zt1,v_at1);
_mm512_store_ps((float *)&fxyz[4*j],v_zt2);
/* fxyz[4*(j+k1)] = exyz[4*(j+k1)]*at1; */
/* fxyz[1+4*(j+k1)] = exyz[1+4*(j+k1)]*at1; */
/* fxyz[2+4*(j+k1)] = exyz[2+4*(j+k1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1)]);
v_zt2 = _mm512_mul_ps(v_zt1,v_at1);
_mm512_store_ps((float *)&fxyz[4*(j+k1)],v_zt2);
/* fxyz[4*(j+l1)] = exyz[4*(j+l1)]*at1; */
/* fxyz[1+4*(j+l1)] = exyz[1+4*(j+l1)]*at1; */
/* fxyz[2+4*(j+l1)] = exyz[2+4*(j+l1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+l1)]);
v_zt2 = _mm512_mul_ps(v_zt1,v_at1);
_mm512_store_ps((float *)&fxyz[4*(j+l1)],v_zt2);
/* fxyz[4*(j+k1+l1)] = exyz[4*(j+k1+l1)]*at1; */
/* fxyz[1+4*(j+k1+l1)] = exyz[1+4*(j+k1+l1)]*at1; */
/* fxyz[2+4*(j+k1+l1)] = exyz[2+4*(j+k1+l1)]*at1; */
v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]);
v_zt2 = _mm512_mul_ps(v_zt1,v_at1);
_mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zt2);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
at1 = cimagf(ffc[j]);
fxyz[4*j] = exyz[4*j]*at1;
fxyz[1+4*j] = exyz[1+4*j]*at1;
fxyz[2+4*j] = exyz[2+4*j]*at1;
fxyz[4*(j+k1)] = exyz[4*(j+k1)]*at1;
fxyz[1+4*(j+k1)] = exyz[1+4*(j+k1)]*at1;
fxyz[2+4*(j+k1)] = exyz[2+4*(j+k1)]*at1;
fxyz[4*(j+l1)] = exyz[4*(j+l1)]*at1;
fxyz[1+4*(j+l1)] = exyz[1+4*(j+l1)]*at1;
fxyz[2+4*(j+l1)] = exyz[2+4*(j+l1)]*at1;
fxyz[4*(j+k1+l1)] = exyz[4*(j+k1+l1)]*at1;
fxyz[1+4*(j+k1+l1)] = exyz[1+4*(j+k1+l1)]*at1;
fxyz[2+4*(j+k1+l1)] = exyz[2+4*(j+k1+l1)]*at1;
}
}
return;
}
/*--------------------------------------------------------------------*/
void ckncfft3rmxy(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int indz,
int nzi, int nzp, int nxhd, int nyd, int nzd,
int nxhyzd, int nxyzhd) {
/* this subroutine performs the x-y part of a three dimensional real to
complex fast fourier transform and its inverse, for a subset of z,
using complex arithmetic, with OpenMP
for isign = (-1,1), input: all, output: f
for isign = -1, approximate flop count: N*(5*log2(N) + 19/2)
for isign = 1, approximate flop count: N*(5*log2(N) + 15/2)
where N = (nx/2)*ny*nz
indx/indy/indz = exponent which determines length in x/y/z direction,
where nx=2**indx, ny=2**indy, nz=2**indz
if isign = -1, an inverse fourier transform in x and y is performed
f[i][m][n] = (1/nx*ny*nz)*sum(f[i][k][j]*exp(-sqrt(-1)*2pi*n*j/nx)*
exp(-sqrt(-1)*2pi*m*k/ny))
if isign = 1, a forward fourier transform in x and y is performed
f[l][k][j] = sum(f[l][m][n]*exp(sqrt(-1)*2pi*n*j/nx)*
exp(sqrt(-1)*2pi*m*k/ny))
mixup = array of bit reversed addresses
sct = sine/cosine table
nzi = initial z index used
nzp = number of z indices used
nxhd = first dimension of f
nyd,nzd = second and third dimensions of f
nxhyzd = maximum of (nx/2,ny,nz)
nxyzhd = maximum of (nx,ny,nz)/2
fourier coefficients are stored as follows:
f[l][k][j] = real, imaginary part of mode j,k,l
where 0 <= j < nx/2, 0 <= k < ny, 0 <= l < nz, except for
f[l][k][0] = real, imaginary part of mode nx/2,k,l,
where ny/2+1 <= k < ny and 0 <= l < nz, and
f[l][0][0] = real, imaginary part of mode nx/2,0,l,
f[l][ny/2][0] = real, imaginary part mode nx/2,ny/2,l,
where nz/2+1 <= l < nz, and
imag(f[0][0][0]) = real part of mode nx/2,0,0
imag(f[0][ny/2][0]) = real part of mode nx/2,ny/2,0
imag(f[nz/2][0][0]) = real part of mode nx/2,0,nz/2
imag(f[nz/2][ny/2][0]) = real part of mode nx/2,ny/2,nz/2
using jpl storage convention, as described in:
E. Huang, P. C. Liewer, V. K. Decyk, and R. D. Ferraro, "Concurrent
Three-Dimensional Fast Fourier Transform Algorithms for Coarse-Grained
Distributed Memory Parallel Computers," Caltech CRPC Report 217-50,
December 1993.
requires KNC, f needs to be 64 byte aligned
nxhd need to be a multiple of 8
written by viktor k. decyk, ucla
local data */
int indx1, ndx1yz, nx, nxh, nxhh, ny, nyh;
int nz, nxyz, nxhyz, nzt, nrx, nry, nrxb, nryb, nxhyd;
int i, j, k, l, n, nn, j1, j2, k1, k2, ns, ns2, km, kmr, joff;
int nss, nxhs, nxhhs, itn;
float ani;
float complex t1, t2, t3;
__m512i v_j, v_kmr, v_m, v_n, v_it;
__m512 v_zero, v_t1, v_t2, v_t3, v_t4, v_t5, v_ani;
v_j = _mm512_set_epi32(7,7,6,6,5,5,4,4,3,3,2,2,1,1,0,0);
if (isign==0)
return;
indx1 = indx - 1;
ndx1yz = indx1 > indy ? indx1 : indy;
ndx1yz = ndx1yz > indz ? ndx1yz : indz;
nx = 1L<<indx;
nxh = nx/2;
nxhh = nx/4;
ny = 1L<<indy;
nyh = ny/2;
nz = 1L<<indz;
nxyz = nx > ny ? nx : ny;
nxyz = nxyz > nz ? nxyz : nz;
nxhyz = 1L<<ndx1yz;
nzt = nzi + nzp - 1;
nxhyd = nxhd*nyd;
nxhs = 8*(nxh/8);
nxhhs = 8*(nxhh/8);
itn = 1 > nxhhs ? 1 : nxhhs;
v_m = _mm512_set_epi32(1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0);
v_n = _mm512_set_epi32(1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14);
v_zero = _mm512_setzero_ps();
v_t1 = _mm512_setzero_ps();
v_t2 = _mm512_setzero_ps();
v_t3 = _mm512_setzero_ps();
v_t4 = _mm512_setzero_ps();
if (isign > 0)
goto L180;
/* inverse fourier transform */
nrxb = nxhyz/nxh;
nrx = nxyz/nxh;
nryb = nxhyz/ny;
nry = nxyz/ny;
#pragma omp parallel for \
private(i,j,k,l,n,ns,ns2,nss,km,kmr,k1,k2,j1,j2,nn,joff,ani,t1,t2,t3, \
v_it,v_kmr,v_t1,v_ani,v_t2,v_t3,v_t4,v_t5)
for (n = nzi-1; n < nzt; n++) {
nn = nxhyd*n;
/* bit-reverse array elements in x */
for (j = 0; j < nxh; j++) {
j1 = (mixup[j] - 1)/nrxb;
if (j < j1) {
for (i = 0; i < ny; i++) {
joff = nxhd*i + nn;
t1 = f[j1+joff];
f[j1+joff] = f[j+joff];
f[j+joff] = t1;
}
}
}
/* first transform in x */
ns = 1;
for (l = 0; l < indx1; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
nss = 8*(ns/8);
v_kmr = _mm512_set1_epi32(2*kmr);
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (i = 0; i < ny; i++) {
joff = nxhd*i + nn;
/* vector loop over elements in blocks of 8 */
for (j = 0; j < nss; j+=8) {
/* t1 = sct[kmr*j]; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m);
v_t1 = _mm512_i32gather_ps(v_it,(float *)sct,4);
/* t2 = t1*f[j+k2+joff]; */
v_t2 = _mm512_load_ps((float *)&f[j+k2+joff]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[j+k2+joff] = f[j+k1+joff] - t2; */
v_t3 = _mm512_load_ps((float *)&f[j+k1+joff]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[j+k2+joff],v_t4);
/* f[j+k1+joff] += t2; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[j+k1+joff],v_t4);
}
/* loop over remaining elements */
for (j = nss; j < ns; j++) {
t1 = sct[kmr*j];
t2 = t1*f[j+k2+joff];
f[j+k2+joff] = f[j+k1+joff] - t2;
f[j+k1+joff] += t2;
}
}
}
ns = ns2;
}
/* unscramble coefficients and normalize */
kmr = nxyz/nx;
ani = 0.5/(((float) nx)*((float) ny)*((float) nz));
v_ani = _mm512_set1_ps(ani);
v_kmr = _mm512_set1_epi32(2*kmr);
for (k = 0; k < ny; k++) {
joff = nxhd*k + nn;
/* vector loop over elements in blocks of 8 */
for (j = 0; j < nxhhs; j+=8) {
/* t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m);
v_t3 = _mm512_i32gather_ps(v_it,(float *)sct,4);
v_t3 = _mm512_mask_sub_ps(v_t3,_mm512_int2mask(21845),
v_zero,v_t3);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,177);
/* t2 = conjf(f[nxh-j+joff]); */
v_t2 = _mm512_loadunpacklo_ps(v_t2,
(float *)&f[nxh-j+joff-7]);
v_t2 = _mm512_loadunpackhi_ps(v_t2,
(float *)&f[nxh-j+joff+1]);
/* reverse data */
v_t2 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t2);
v_t2 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(43690),
v_zero,v_t2);
/* t1 = f[j+joff] + t2; */
v_t4 = _mm512_load_ps((float *)&f[j+joff]);
v_t1 = _mm512_add_ps(v_t4,v_t2);
/* t2 = (f[j+joff] - t2)*t3; */
v_t2 = _mm512_sub_ps(v_t4,v_t2);
v_t5 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,160);
v_t5 = _mm512_mul_ps(v_t2,v_t5);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t5,v_t4);
/* f[j+joff] = ani*(t1 + t2); */
v_t3 = _mm512_mul_ps(v_ani,_mm512_add_ps(v_t1,v_t2));
/* f[nxh-j+joff] = ani*conjf(t1 - t2); */
v_t4 = _mm512_sub_ps(v_t1,v_t2);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(43690),
v_zero,v_t4);
v_t4 = _mm512_mul_ps(v_ani,v_t4);
/* reverse data */
v_t4 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t4);
if (j==0) {
_mm512_mask_store_ps((float *)&f[j+joff],
_mm512_int2mask(65532),v_t3);
_mm512_mask_packstorelo_ps((float *)&f[nxh-j+joff-7],
_mm512_int2mask(16383),v_t4);
_mm512_mask_packstorehi_ps((float *)&f[nxh-j+joff+1],
_mm512_int2mask(16383),v_t4);
}
else {
_mm512_store_ps((float *)&f[j+joff],v_t3);
_mm512_packstorelo_ps((float *)&f[nxh-j+joff-7],v_t4);
_mm512_packstorehi_ps((float *)&f[nxh-j+joff+1],v_t4);
}
}
/* loop over remaining elements */
for (j = itn; j < nxhh; j++) {
t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I;
t2 = conjf(f[nxh-j+joff]);
t1 = f[j+joff] + t2;
t2 = (f[j+joff] - t2)*t3;
f[j+joff] = ani*(t1 + t2);
f[nxh-j+joff] = ani*conjf(t1 - t2);
}
}
ani = 2.0*ani;
for (k = 0; k < ny; k++) {
joff = nxhd*k + nn;
f[nxhh+joff] = ani*conjf(f[nxhh+joff]);
f[joff] = ani*((crealf(f[joff]) + cimagf(f[joff]))
+ (crealf(f[joff]) - cimagf(f[joff]))*_Complex_I);
}
/* bit-reverse array elements in y */
for (k = 0; k < ny; k++) {
joff = nxhd*k + nn;
k1 = (mixup[k] - 1)/nryb;
if (k < k1) {
k1 = nxhd*k1 + nn;
/* vector loop over elements in blocks of 8 */
for (i = 0; i < nxhs; i+=8) {
/* t1 = f[i+k1]; */
v_t1 = _mm512_load_ps((float *)&f[i+k1]);
/* f[i+k1] = f[i+joff]; */
v_t2 = _mm512_load_ps((float *)&f[i+joff]);
_mm512_store_ps((float *)&f[i+k1],v_t2);
/* f[i+joff] = t1; */
_mm512_store_ps((float *)&f[i+joff],v_t1);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t1 = f[i+k1];
f[i+k1] = f[i+joff];
f[i+joff] = t1;
}
}
}
/* then transform in y */
ns = 1;
for (l = 0; l < indy; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhd*(j + k1) + nn;
j2 = nxhd*(j + k2) + nn;
t1 = sct[kmr*j];
v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1),
crealf(t1));
/* vector loop over elements in blocks of 8 */
for (i = 0; i < nxhs; i+=8) {
/* t2 = t1*f[i+j2]; */
v_t2 = _mm512_load_ps((float *)&f[i+j2]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[i+j2] = f[i+j1] - t2; */
v_t3 = _mm512_load_ps((float *)&f[i+j1]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[i+j2],v_t4);
/* f[i+j1] += t2; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[i+j1],v_t4);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t2 = t1*f[i+j2];
f[i+j2] = f[i+j1] - t2;
f[i+j1] += t2;
}
}
}
ns = ns2;
}
/* unscramble modes kx = 0, nx/2 */
for (k = 1; k < nyh; k++) {
joff = nxhd*k;
k1 = nxhd*ny - joff + nn;
joff += nn;
t1 = f[k1];
f[k1] = 0.5*(cimagf(f[joff] + t1)
+ crealf(f[joff] - t1)*_Complex_I);
f[joff] = 0.5*(crealf(f[joff] + t1)
+ cimagf(f[joff] - t1)*_Complex_I);
}
}
return;
/* forward fourier transform */
L180: nryb = nxhyz/ny;
nry = nxyz/ny;
nrxb = nxhyz/nxh;
nrx = nxyz/nxh;
#pragma omp parallel for \
private(i,j,k,l,n,ns,ns2,nss,km,kmr,k1,k2,j1,j2,nn,joff,t1,t2,t3,v_it, \
v_kmr,v_t1,v_t2,v_t3,v_t4,v_t5)
for (n = nzi-1; n < nzt; n++) {
nn = nxhyd*n;
/* scramble modes kx = 0, nx/2 */
for (k = 1; k < nyh; k++) {
joff = nxhd*k;
k1 = nxhd*ny - joff + nn;
joff += nn;
t1 = cimagf(f[k1]) + crealf(f[k1])*_Complex_I;
f[k1] = conjf(f[joff] - t1);
f[joff] += t1;
}
/* bit-reverse array elements in y */
for (k = 0; k < ny; k++) {
joff = nxhd*k + nn;
k1 = (mixup[k] - 1)/nryb;
if (k < k1) {
k1 = nxhd*k1 + nn;
/* vector loop over elements in blocks of 8 */
for (i = 0; i < nxhs; i+=8) {
/* t1 = f[i+k1]; */
v_t1 = _mm512_load_ps((float *)&f[i+k1]);
/* f[i+k1] = f[i+joff]; */
v_t2 = _mm512_load_ps((float *)&f[i+joff]);
_mm512_store_ps((float *)&f[i+k1],v_t2);
/* f[i+joff] = t1; */
_mm512_store_ps((float *)&f[i+joff],v_t1);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t1 = f[i+k1];
f[i+k1] = f[i+joff];
f[i+joff] = t1;
}
}
}
/* then transform in y */
ns = 1;
for (l = 0; l < indy; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhd*(j + k1) + nn;
j2 = nxhd*(j + k2) + nn;
t1 = conjf(sct[kmr*j]);
v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1),
crealf(t1));
/* vector loop over elements in blocks of 8 */
for (i = 0; i < nxhs; i+=8) {
/* t2 = t1*f[i+j2]; */
v_t2 = _mm512_load_ps((float *)&f[i+j2]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[i+j2] = f[i+j1] - t2; */
v_t3 = _mm512_load_ps((float *)&f[i+j1]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[i+j2],v_t4);
/* f[i+j1] += t2; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[i+j1],v_t4);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t2 = t1*f[i+j2];
f[i+j2] = f[i+j1] - t2;
f[i+j1] += t2;
}
}
}
ns = ns2;
}
/* scramble coefficients */
kmr = nxyz/nx;
v_kmr = _mm512_set1_epi32(2*kmr);
for (k = 0; k < ny; k++) {
joff = nxhd*k + nn;
/* vector loop over elements in blocks of 8 */
for (j = 0; j < nxhhs; j+=8) {
/* t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m);
v_t3 = _mm512_i32gather_ps(v_it,(float *)sct,4);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,177);
/* t2 = conjf(f[nxh-j+joff]); */
v_t2 = _mm512_loadunpacklo_ps(v_t2,
(float *)&f[nxh-j+joff-7]);
v_t2 = _mm512_loadunpackhi_ps(v_t2,
(float *)&f[nxh-j+joff+1]);
/* reverse data */
v_t2 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t2);
v_t2 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(43690),
v_zero,v_t2);
/* t1 = f[j+joff] + t2; */
v_t4 = _mm512_load_ps((float *)&f[j+joff]);
v_t1 = _mm512_add_ps(v_t4,v_t2);
/* t2 = (f[j+joff] - t2)*t3; */
v_t2 = _mm512_sub_ps(v_t4,v_t2);
v_t5 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,160);
v_t5 = _mm512_mul_ps(v_t2,v_t5);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t5,v_t4);
/* f[j+joff] = t1 + t2; */
v_t3 = _mm512_add_ps(v_t1,v_t2);
/* f[nxh-j+joff] = conjf(t1 - t2); */
v_t4 = _mm512_sub_ps(v_t1,v_t2);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(43690),
v_zero,v_t4);
/* reverse data */
v_t4 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t4);
if (j==0) {
_mm512_mask_store_ps((float *)&f[j+joff],
_mm512_int2mask(65532),v_t3);
_mm512_mask_packstorelo_ps((float *)&f[nxh-j+joff-7],
_mm512_int2mask(16383),v_t4);
_mm512_mask_packstorehi_ps((float *)&f[nxh-j+joff+1],
_mm512_int2mask(16383),v_t4);
}
else {
_mm512_store_ps((float *)&f[j+joff],v_t3);
_mm512_packstorelo_ps((float *)&f[nxh-j+joff-7],v_t4);
_mm512_packstorehi_ps((float *)&f[nxh-j+joff+1],v_t4);
}
}
/* loop over remaining elements */
for (j = itn; j < nxhh; j++) {
t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I;
t2 = conjf(f[nxh-j+joff]);
t1 = f[j+joff] + t2;
t2 = (f[j+joff] - t2)*t3;
f[j+joff] = t1 + t2;
f[nxh-j+joff] = conjf(t1 - t2);
}
}
for (k = 0; k < ny; k++) {
joff = nxhd*k + nn;
f[nxhh+joff] = 2.0*conjf(f[nxhh+joff]);
f[joff] = (crealf(f[joff]) + cimagf(f[joff]))
+ (crealf(f[joff]) - cimagf(f[joff]))*_Complex_I;
}
/* bit-reverse array elements in x */
for (j = 0; j < nxh; j++) {
j1 = (mixup[j] - 1)/nrxb;
if (j < j1) {
for (i = 0; i < ny; i++) {
joff = nxhd*i + nn;
t1 = f[j1+joff];
f[j1+joff] = f[j+joff];
f[j+joff] = t1;
}
}
}
/* finally transform in x */
ns = 1;
for (l = 0; l < indx1; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
nss = 8*(ns/8);
v_kmr = _mm512_set1_epi32(2*kmr);
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (i = 0; i < ny; i++) {
joff = nxhd*i + nn;
/* vector loop over elements in blocks of 8 */
for (j = 0; j < nss; j+=8) {
/* t1 = conjf(sct[kmr*j]); */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m);
v_t1 = _mm512_i32gather_ps(v_it,(float *)sct,4);
v_t1 = _mm512_mask_sub_ps(v_t1,_mm512_int2mask(43690),
v_zero,v_t1);
/* t2 = t1*f[j+k2+joff]; */
v_t2 = _mm512_load_ps((float *)&f[j+k2+joff]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[j+k2+joff] = f[j+k1+joff] - t2; */
v_t3 = _mm512_load_ps((float *)&f[j+k1+joff]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[j+k2+joff],v_t4);
/* f[j+k1+joff] += t2; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[j+k1+joff],v_t4);
}
/* loop over remaining elements */
for (j = nss; j < ns; j++) {
t1 = conjf(sct[kmr*j]);
t2 = t1*f[j+k2+joff];
f[j+k2+joff] = f[j+k1+joff] - t2;
f[j+k1+joff] += t2;
}
}
}
ns = ns2;
}
}
return;
}
/*--------------------------------------------------------------------*/
void ckncfft3rmz(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int indz,
int nyi, int nyp, int nxhd, int nyd, int nzd,
int nxhyzd, int nxyzhd) {
/* this subroutine performs the z part of a three dimensional real to
complex fast fourier transform and its inverse, for a subset of y,
using complex arithmetic, with OpenMP
for isign = (-1,1), input: all, output: f
for isign = -1, approximate flop count: N*(5*log2(N) + 19/2)
for isign = 1, approximate flop count: N*(5*log2(N) + 15/2)
where N = (nx/2)*ny*nz
indx/indy/indz = exponent which determines length in x/y/z direction,
where nx=2**indx, ny=2**indy, nz=2**indz
if isign = -1, an inverse fourier transform in z is performed
f[l][k][j] = sum(f[i][k][j]*exp(-sqrt(-1)*2pi*l*i/nz))
if isign = 1, a forward fourier transform in z is performed
f[i][m][n] = sum(f[l][m][n]*exp(sqrt(-1)*2pi*l*i/nz))
mixup = array of bit reversed addresses
sct = sine/cosine table
nyi = initial y index used
nyp = number of y indices used
nxhd = first dimension of f
nyd,nzd = second and third dimensions of f
nxhyzd = maximum of (nx/2,ny,nz)
nxyzhd = maximum of (nx,ny,nz)/2
fourier coefficients are stored as follows:
f[l][k][j] = real, imaginary part of mode j,k,l
where 0 <= j < nx/2, 0 <= k < ny, 0 <= l < nz, except for
f[l][k][0] = real, imaginary part of mode nx/2,k,l,
where ny/2+1 <= k < ny and 0 <= l < nz, and
f[l][0][0] = real, imaginary part of mode nx/2,0,l,
f[l][ny/2][0] = real, imaginary part mode nx/2,ny/2,l,
where nz/2+1 <= l < nz, and
imag(f[0][0][0]) = real part of mode nx/2,0,0
imag(f[0][ny/2][0]) = real part of mode nx/2,ny/2,0
imag(f[nz/2][0][0]) = real part of mode nx/2,0,nz/2
imag(f[nz/2][ny/2][0]) = real part of mode nx/2,ny/2,nz/2
using jpl storage convention, as described in:
E. Huang, P. C. Liewer, V. K. Decyk, and R. D. Ferraro, "Concurrent
Three-Dimensional Fast Fourier Transform Algorithms for Coarse-Grained
Distributed Memory Parallel Computers," Caltech CRPC Report 217-50,
December 1993.
requires KNC, f needs to be 64 byte aligned
nxhd need to be a multiple of 8
written by viktor k. decyk, ucla
local data */
int indx1, ndx1yz, nx, nxh, ny, nyh;
int nz, nzh, nxyz, nxhyz, nyt, nrz, nrzb, nxhyd, ioff;
int i, j, k, l, n, ll, j1, j2, k1, k2, l1, ns, ns2, km, kmr, i0, i1;
int nss, nxhs;
float complex t1, t2;
__m512 v_zero, v_t1, v_t2, v_t3, v_t4;
if (isign==0)
return;
indx1 = indx - 1;
ndx1yz = indx1 > indy ? indx1 : indy;
ndx1yz = ndx1yz > indz ? ndx1yz : indz;
nx = 1L<<indx;
nxh = nx/2;
ny = 1L<<indy;
nyh = ny/2;
nz = 1L<<indz;
nzh = nz/2;
nxyz = nx > ny ? nx : ny;
nxyz = nxyz > nz ? nxyz : nz;
nxhyz = 1L<<ndx1yz;
nyt = nyi + nyp - 1;
nxhyd = nxhd*nyd;
nxhs = 8*(nxh/8);
v_zero = _mm512_setzero_ps();
v_t1 = _mm512_setzero_ps();
v_t2 = _mm512_setzero_ps();
v_t3 = _mm512_setzero_ps();
v_t4 = _mm512_setzero_ps();
if (isign > 0)
goto L90;
/* inverse fourier transform */
nrzb = nxhyz/nz;
nrz = nxyz/nz;
#pragma omp parallel for \
private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,ll,l1,i0,i1,ioff,t1,t2, \
v_t1,v_t2,v_t3,v_t4)
for (n = nyi-1; n < nyt; n++) {
ioff = nxhd*n;
/* bit-reverse array elements in z */
for (l = 0; l < nz; l++) {
ll = nxhyd*l;
l1 = (mixup[l] - 1)/nrzb;
if (l < l1) {
l1 = nxhyd*l1;
i0 = ioff + ll;
i1 = ioff + l1;
/* vector loop over elements in blocks of 8 */
for (i = 0; i < nxhs; i+=8) {
/* t1 = f[i+i1]; */
v_t1 = _mm512_load_ps((float *)&f[i+i1]);
/* f[i+i1] = f[i+i0]; */
v_t2 = _mm512_load_ps((float *)&f[i+i0]);
_mm512_store_ps((float *)&f[i+i1],v_t2);
/* f[i+i0] = t1; */
_mm512_store_ps((float *)&f[i+i0],v_t1);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t1 = f[i+i1];
f[i+i1] = f[i+i0];
f[i+i0] = t1;
}
}
}
/* finally transform in z */
ns = 1;
for (l = 0; l < indz; l++) {
ns2 = ns + ns;
km = nzh/ns;
kmr = km*nrz;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhyd*(j + k1);
j2 = nxhyd*(j + k2);
t1 = sct[kmr*j];
v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1),
crealf(t1));
i0 = ioff + j1;
i1 = ioff + j2;
/* vector loop over elements in blocks of 8 */
for (i = 0; i < nxhs; i+=8) {
/* t2 = t1*f[i+i1]; */
v_t2 = _mm512_load_ps((float *)&f[i+i1]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[i+i1] = f[i+i0] - t2; */
v_t3 = _mm512_load_ps((float *)&f[i+i0]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[i+i1],v_t4);
/* f[i+i0] += t2; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[i+i0],v_t4);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t2 = t1*f[i+i1];
f[i+i1] = f[i+i0] - t2;
f[i+i0] += t2;
}
}
}
ns = ns2;
}
}
/* unscramble modes kx = 0, nx/2 */
if (nyi==1) {
for (n = 1; n < nzh; n++) {
ll = nxhyd*n;
l1 = nxhyd*nz - ll;
t1 = f[l1];
f[l1] = 0.5*(cimagf(f[ll] + t1)
+ crealf(f[ll] - t1)*_Complex_I);
f[ll] = 0.5*(crealf(f[ll] + t1)
+ cimagf(f[ll] - t1)*_Complex_I);
}
}
if ((nyi <= (nyh+1)) && (nyt >= (nyh+1))) {
for (n = 1; n < nzh; n++) {
ll = nxhyd*n;
l1 = nxhyd*nz - ll;
i1 = nxhd*nyh;
i0 = i1 + ll;
i1 += l1;
t1 = f[i1];
f[i1] = 0.5*(cimagf(f[i0] + t1)
+ crealf(f[i0] - t1)*_Complex_I);
f[i0] = 0.5*(crealf(f[i0] + t1)
+ cimagf(f[i0] - t1)*_Complex_I);
}
}
return;
/* forward fourier transform */
L90: nrzb = nxhyz/nz;
nrz = nxyz/nz;
/* scramble modes kx = 0, nx/2 */
if (nyi==1) {
for (n = 1; n < nzh; n++) {
ll = nxhyd*n;
l1 = nxhyd*nz - ll;
t1 = cimagf(f[l1]) + crealf(f[l1])*_Complex_I;
f[l1] = conjf(f[ll] - t1);
f[ll] += t1;
}
}
if ((nyi <= (nyh+1)) && (nyt >= (nyh+1))) {
for (n = 1; n < nzh; n++) {
ll = nxhyd*n;
l1 = nxhyd*nz - ll;
i1 = nxhd*nyh;
i0 = i1 + ll;
i1 += l1;
t1 = cimagf(f[i1]) + crealf(f[i1])*_Complex_I;
f[i1] = conjf(f[i0] - t1);
f[i0] += t1;
}
}
/* bit-reverse array elements in z */
#pragma omp parallel for \
private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,ll,l1,i0,i1,ioff,t1,t2, \
v_t1,v_t2,v_t3,v_t4)
for (n = nyi-1; n < nyt; n++) {
ioff = nxhd*n;
for (l = 0; l < nz; l++) {
ll = nxhyd*l;
l1 = (mixup[l] - 1)/nrzb;
if (l < l1) {
l1 = nxhyd*l1;
i0 = ioff + ll;
i1 = ioff + l1;
/* vector loop over elements in blocks of 8 */
for (i = 0; i < nxhs; i+=8) {
/* t1 = f[i+i1]; */
v_t1 = _mm512_load_ps((float *)&f[i+i1]);
/* f[i+i1] = f[i+i0]; */
v_t2 = _mm512_load_ps((float *)&f[i+i0]);
_mm512_store_ps((float *)&f[i+i1],v_t2);
/* f[i+i0] = t1; */
_mm512_store_ps((float *)&f[i+i0],v_t1);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t1 = f[i+i1];
f[i+i1] = f[i+i0];
f[i+i0] = t1;
}
}
}
/* first transform in z */
ns = 1;
for (l = 0; l < indz; l++) {
ns2 = ns + ns;
km = nzh/ns;
kmr = km*nrz;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhyd*(j + k1);
j2 = nxhyd*(j + k2);
t1 = conjf(sct[kmr*j]);
v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1),
crealf(t1));
i0 = ioff + j1;
i1 = ioff + j2;
/* vector loop over elements in blocks of 8 */
for (i = 0; i < nxhs; i+=8) {
/* t2 = t1*f[i+i1]; */
v_t2 = _mm512_load_ps((float *)&f[i+i1]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[i+i1] = f[i+i0] - t2; */
v_t3 = _mm512_load_ps((float *)&f[i+i0]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[i+i1],v_t4);
/* f[i+i0] += t2; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[i+i0],v_t4);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t2 = t1*f[i+i1];
f[i+i1] = f[i+i0] - t2;
f[i+i0] += t2;
}
}
}
ns = ns2;
}
}
return;
}
/*--------------------------------------------------------------------*/
void ckncfft3rm3xy(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int indz,
int nzi, int nzp, int nxhd, int nyd, int nzd,
int nxhyzd, int nxyzhd) {
/* this subroutine performs the x-y part of 3 three dimensional complex
to real fast fourier transforms and their inverses, for a subset of z,
using complex arithmetic, with OpenMP
for isign = (-1,1), input: all, output: f
for isign = -1, approximate flop count: N*(5*log2(N) + 19/2)
for isign = 1, approximate flop count: N*(5*log2(N) + 15/2)
where N = (nx/2)*ny*nz
indx/indy/indz = exponent which determines length in x/y/z direction,
where nx=2**indx, ny=2**indy, nz=2**indz
if isign = -1, three inverse fourier transforms in x and y are
performed
f[i][m][n][0:2] = (1/nx*ny*nz)*sum(f[i][k][j][0:2]*
exp(-sqrt(-1)*2pi*n*j/nx)*exp(-sqrt(-1)*2pi*m*k/ny))
if isign = 1, three forward fourier transforms in x and y are
performed
f[l][k][j][0:2] = sum(f[l][m][n][0:2]*exp(sqrt(-1)*2pi*n*j/nx)*
exp(sqrt(-1)*2pi*m*k/ny))
mixup = array of bit reversed addresses
sct = sine/cosine table
nzi = initial z index used
nzp = number of z indices used
nxhd = second dimension of f
nyd,nzd = third and fourth dimensions of f
nxhyzd = maximum of (nx/2,ny,nz)
nxyzhd = maximum of (nx,ny,nz)/2
fourier coefficients are stored as follows:
f[l][k][j][0:2] = real, imaginary part of mode j,k,l
where 0 <= j < nx/2, 0 <= k < ny, 0 <= l < nz, except for
f[l][k][0][0:2] = real, imaginary part of mode nx/2,k,l,
where ny/2+1 <= k < ny and 0 <= l < nz, and
f[l][0][0][0:2] = real, imaginary part of mode nx/2,0,l,
f[l][ny/2][0][0:2] = real, imaginary part mode nx/2,ny/2,l,
where nz/2+1 <= l < nz, and
imag(f[0][0][0][0:2]) = real part of mode nx/2,0,0
imag(f[0][ny/2][0][0:2]) = real part of mode nx/2,ny/2,0
imag(f[nz/2][0][0][0:2]) = real part of mode nx/2,0,nz/2
imag(f[nz/2][ny/2][0][0:2]) = real part of mode nx/2,ny/2,nz/2
using jpl storage convention, as described in:
E. Huang, P. C. Liewer, V. K. Decyk, and R. D. Ferraro, "Concurrent
Three-Dimensional Fast Fourier Transform Algorithms for Coarse-Grained
Distributed Memory Parallel Computers," Caltech CRPC Report 217-50,
December 1993.
requires KNC, f needs to be 64 byte aligned
nxhd need to be a multiple of 2
f needs to have 4 components
written by viktor k. decyk, ucla
local data */
int indx1, ndx1yz, nx, nxh, nxhh, ny, nyh;
int nz, nxyz, nxhyz, nzt, nrx, nry, nrxb, nryb, nxhd4, nxhyd;
int i, j, k, l, n, nn, jj, j1, j2, k1, k2, ns, ns2, km, kmr, joff;
int nss, nxhs, nxhhs, itn;
float at1, at2, ani;
float complex t1, t2, t3, t4;
__m512i v_j, v_kmr, v_m, v_n, v_l, v_it;
__m512 v_zero, v_t1, v_t2, v_t3, v_t4, v_t5, v_ani, v_half;
v_j = _mm512_set_epi32(1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0);
if (isign==0)
return;
indx1 = indx - 1;
ndx1yz = indx1 > indy ? indx1 : indy;
ndx1yz = ndx1yz > indz ? ndx1yz : indz;
nx = 1L<<indx;
nxh = nx/2;
nxhh = nx/4;
ny = 1L<<indy;
nyh = ny/2;
nz = 1L<<indz;
nxyz = nx > ny ? nx : ny;
nxyz = nxyz > nz ? nxyz : nz;
nxhyz = 1L<<ndx1yz;
nzt = nzi + nzp - 1;
nxhd4 = 4*nxhd;
nxhyd = nxhd4*nyd;
nxhs = 2*(nxh/2);
nxhhs = 2*(nxhh/2);
itn = 1 > nxhhs ? 1 : nxhhs;
v_m = _mm512_set_epi32(1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0);
v_n = _mm512_set_epi32(7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8);
v_zero = _mm512_setzero_ps();
v_t1 = _mm512_setzero_ps();
v_t2 = _mm512_setzero_ps();
v_t3 = _mm512_setzero_ps();
v_t4 = _mm512_setzero_ps();
v_half = _mm512_set1_ps(0.5f);
if (isign > 0)
goto L230;
/* inverse fourier transform */
nrxb = nxhyz/nxh;
nrx = nxyz/nxh;
nryb = nxhyz/ny;
nry = nxyz/ny;
v_l = _mm512_set_epi32(15,11,14,10,13,9,12,8,7,3,6,2,5,1,4,0);
#pragma omp parallel for \
private(i,j,k,l,n,ns,ns2,nss,km,kmr,k1,k2,jj,j1,j2,nn,joff,at1,at2, \
ani,t1,t2,t3,t4,v_it,v_kmr,v_t1,v_ani,v_t2,v_t3,v_t4,v_t5)
for (n = nzi-1; n < nzt; n++) {
nn = nxhyd*n;
/* swap complex components */
for (i = 0; i < ny; i++) {
joff = nxhd4*i + nn;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* at1 = cimagf(f[2+4*j+joff]); */
/* at2 = crealf(f[2+4*j+joff]); */
/* f[2+4*j+joff] = crealf(f[1+4*j+joff]) */
/* + crealf(f[3+4*j+joff])*_Complex_I; */
/* f[1+4*j+joff] = cimagf(f[4*j+joff]) + at1*_Complex_I; */
/* f[4*j+joff] = crealf(f[4*j+joff]) + at2*_Complex_I; */
v_t1 = _mm512_load_ps((float *)&f[4*j+joff]);
v_t1 = (__m512)_mm512_permutevar_epi32(v_l,(__m512i)v_t1);
_mm512_store_ps((float *)&f[4*j+joff],v_t1);
}
/* loop over remaining elements */
for (j = nxhs; j < nxh; j++) {
at1 = cimagf(f[2+4*j+joff]);
at2 = crealf(f[2+4*j+joff]);
f[2+4*j+joff] = crealf(f[1+4*j+joff])
+ crealf(f[3+4*j+joff])*_Complex_I;
f[1+4*j+joff] = cimagf(f[4*j+joff]) + at1*_Complex_I;
f[4*j+joff] = crealf(f[4*j+joff]) + at2*_Complex_I;
}
}
/* bit-reverse array elements in x */
for (j = 0; j < nxh; j++) {
j1 = (mixup[j] - 1)/nrxb;
if (j < j1) {
for (i = 0; i < ny; i++) {
joff = nxhd4*i + nn;
/* t1 = f[4*j1+joff]; */
/* t2 = f[1+4*j1+joff]; */
/* t3 = f[2+4*j1+joff]; */
v_t1 = _mm512_mask_loadunpacklo_ps(v_t1,
_mm512_int2mask(255),(float *)&f[4*j1+joff]);
v_t1 = _mm512_mask_loadunpackhi_ps(v_t1,
_mm512_int2mask(255),(float *)&f[4*j1+joff+8]);
/* f[4*j1+joff] = f[4*j+joff]; */
/* f[1+4*j1+joff] = f[1+4*j+joff]; */
/* f[2+4*j1+joff] = f[2+4*j+joff]; */
v_t2 = _mm512_mask_loadunpacklo_ps(v_t2,
_mm512_int2mask(255),(float *)&f[4*j+joff]);
v_t2 = _mm512_mask_loadunpackhi_ps(v_t2,
_mm512_int2mask(255),(float *)&f[4*j+joff+8]);
_mm512_mask_packstorelo_ps((float *)&f[4*j1+joff],
_mm512_int2mask(255),v_t2);
_mm512_mask_packstorehi_ps((float *)&f[4*j1+joff+8],
_mm512_int2mask(255),v_t2);
/* f[4*j+joff] = t1; */
/* f[1+4*j+joff] = t2; */
/* f[2+4*j+joff] = t3; */
_mm512_mask_packstorelo_ps((float *)&f[4*j+joff],
_mm512_int2mask(255),v_t1);
_mm512_mask_packstorehi_ps((float *)&f[4*j+joff+8],
_mm512_int2mask(255),v_t1);
}
}
}
/* first transform in x */
ns = 1;
for (l = 0; l < indx1; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
nss = 2*(ns/2);
v_kmr = _mm512_set1_epi32(2*kmr);
for (k = 0; k < km; k++) {
k1 = 4*ns2*k;
k2 = k1 + 4*ns;
for (i = 0; i < ny; i++) {
joff = nxhd4*i + nn;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nss; j+=2) {
/* t1 = sct[kmr*j]; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m);
v_t1 = _mm512_i32gather_ps(v_it,(float *)sct,4);
/* t2 = t1*f[4*j+k2+joff]; */
/* t3 = t1*f[1+4*j+k2+joff]; */
/* t4 = t1*f[2+4*j+k2+joff]; */
v_t2 = _mm512_load_ps((float *)&f[4*j+k2+joff]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[4*j+k2+joff] = f[4*j+k1+joff] - t2; */
/* f[1+4*j+k2+joff] = f[1+4*j+k1+joff] - t3; */
/* f[2+4*j+k2+joff] = f[2+4*j+k1+joff] - t4; */
v_t3 = _mm512_load_ps((float *)&f[4*j+k1+joff]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*j+k2+joff],v_t4);
/* f[4*j+k1+joff] += t2; */
/* f[1+4*j+k1+joff] += t3; */
/* f[2+4*j+k1+joff] += t4; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*j+k1+joff],v_t4);
}
/* loop over remaining elements */
for (j = nss; j < ns; j++) {
t1 = sct[kmr*j];
t2 = t1*f[4*j+k2+joff];
t3 = t1*f[1+4*j+k2+joff];
t4 = t1*f[2+4*j+k2+joff];
f[4*j+k2+joff] = f[4*j+k1+joff] - t2;
f[1+4*j+k2+joff] = f[1+4*j+k1+joff] - t3;
f[2+4*j+k2+joff] = f[2+4*j+k1+joff] - t4;
f[4*j+k1+joff] += t2;
f[1+4*j+k1+joff] += t3;
f[2+4*j+k1+joff] += t4;
}
}
}
ns = ns2;
}
/* unscramble coefficients and normalize */
kmr = nxyz/nx;
ani = 0.5/(((float) nx)*((float) ny)*((float) nz));
v_ani = _mm512_set1_ps(ani);
v_kmr = _mm512_set1_epi32(2*kmr);
for (k = 0; k < ny; k++) {
joff = nxhd4*k + nn;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhhs; j+=2) {
/* t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m);
v_t3 = _mm512_i32gather_ps(v_it,(float *)sct,4);
v_t3 = _mm512_mask_sub_ps(v_t3,_mm512_int2mask(21845),
v_zero,v_t3);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,177);
/* for (jj = 0; jj < 3; jj++) { */
/* t2 = conjf(f[jj+4*(nxh-j)+joff]); */
v_t2 = _mm512_loadunpacklo_ps(v_t2,
(float *)&f[4*(nxh-j-1)+joff]);
v_t2 = _mm512_loadunpackhi_ps(v_t2,
(float *)&f[4*(nxh-j-1)+joff+8]);
/* reverse data */
v_t2 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t2);
v_t2 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(43690),
v_zero,v_t2);
/* t1 = f[jj+4*j+joff] + t2; */
v_t4 = _mm512_load_ps((float *)&f[4*j+joff]);
v_t1 = _mm512_add_ps(v_t4,v_t2);
/* t2 = (f[jj+4*j+joff] - t2)*t3; */
v_t2 = _mm512_sub_ps(v_t4,v_t2);
v_t5 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,160);
v_t5 = _mm512_mul_ps(v_t2,v_t5);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t5,v_t4);
/* f[jj+4*j+joff] = ani*(t1 + t2); */
v_t3 = _mm512_mul_ps(v_ani,_mm512_add_ps(v_t1,v_t2));
/* f[jj+4*(nxh-j)+joff] = ani*conjf(t1 - t2); */
/* } */
v_t4 = _mm512_sub_ps(v_t1,v_t2);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(43690),
v_zero,v_t4);
v_t4 = _mm512_mul_ps(v_ani,v_t4);
/* reverse data */
v_t4 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t4);
if (j==0) {
_mm512_mask_store_ps((float *)&f[4*j+joff],
_mm512_int2mask(65280),v_t3);
_mm512_mask_packstorelo_ps((float *)&f[4*(nxh-j-1)+joff],
_mm512_int2mask(255),v_t4);
_mm512_mask_packstorehi_ps((float *)&f[4*(nxh-j-1)+joff+8],
_mm512_int2mask(255),v_t4);
}
else {
_mm512_store_ps((float *)&f[4*j+joff],v_t3);
_mm512_packstorelo_ps((float *)&f[4*(nxh-j-1)+joff],v_t4);
_mm512_packstorehi_ps((float *)&f[4*(nxh-j-1)+joff+8],v_t4);
}
}
/* loop over remaining elements */
for (j = itn; j < nxhh; j++) {
t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I;
for (jj = 0; jj < 3; jj++) {
t2 = conjf(f[jj+4*(nxh-j)+joff]);
t1 = f[jj+4*j+joff] + t2;
t2 = (f[jj+4*j+joff] - t2)*t3;
f[jj+4*j+joff] = ani*(t1 + t2);
f[jj+4*(nxh-j)+joff] = ani*conjf(t1 - t2);
}
}
}
/* ani = 2.0*ani; */
v_ani = _mm512_add_ps(v_ani,v_ani);
for (k = 0; k < ny; k++) {
joff = nxhd4*k + nn;
/* for (jj = 0; jj < 3; jj++) { */
/* f[jj+4*nxhh+joff] = ani*conjf(f[jj+4*nxhh+joff]); */
v_t1 = _mm512_mask_load_ps(v_t1,_mm512_int2mask(63),
(float *)&f[4*nxhh+joff]);
v_t1 = _mm512_mask_sub_ps(v_t1,_mm512_int2mask(42),v_zero,
v_t1);
v_t1 = _mm512_mul_ps(v_ani,v_t1);
_mm512_mask_store_ps((float *)&f[4*nxhh+joff],
_mm512_int2mask(63),v_t1);
/* f[jj+joff] = ani*((crealf(f[jj+joff]) */
/* + cimagf(f[jj+joff])) */
/* + (crealf(f[jj+joff]) */
/* - cimagf(f[jj+joff]))*_Complex_I); */
/* } */
v_t2 = _mm512_mask_load_ps(v_t2,_mm512_int2mask(63),
(float *)&f[joff]);
v_t1 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t3 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(42),v_t1,v_t2);
v_t3 = _mm512_mask_add_ps(v_t3,_mm512_int2mask(21),v_t1,v_t2);
v_t3 = _mm512_mul_ps(v_ani,v_t3);
_mm512_mask_store_ps((float *)&f[joff],_mm512_int2mask(63),
v_t3);
}
/* bit-reverse array elements in y */
for (k = 0; k < ny; k++) {
joff = nxhd4*k + nn;
k1 = (mixup[k] - 1)/nryb;
if (k < k1) {
k1 = nxhd4*k1 + nn;
/* vector loop over elements in blocks of 2 */
for (i = 0; i < nxhs; i+=2) {
/* t1 = f[4*i+k1]; */
/* t2 = f[1+4*i+k1]; */
/* t3 = f[2+4*i+k1]; */
v_t1 = _mm512_load_ps((float *)&f[4*i+k1]);
/* f[4*i+k1] = f[4*i+joff]; */
/* f[1+4*i+k1] = f[1+4*i+joff]; */
/* f[2+4*i+k1] = f[2+4*i+joff]; */
v_t2 = _mm512_load_ps((float *)&f[4*i+joff]);
_mm512_store_ps((float *)&f[4*i+k1],v_t2);
/* f[4*i+joff] = t1; */
/* f[1+4*i+joff] = t2; */
/* f[2+4*i+joff] = t3; */
_mm512_store_ps((float *)&f[4*i+joff],v_t1);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t1 = f[4*i+k1];
t2 = f[1+4*i+k1];
t3 = f[2+4*i+k1];
f[4*i+k1] = f[4*i+joff];
f[1+4*i+k1] = f[1+4*i+joff];
f[2+4*i+k1] = f[2+4*i+joff];
f[4*i+joff] = t1;
f[1+4*i+joff] = t2;
f[2+4*i+joff] = t3;
}
}
}
/* then transform in y */
ns = 1;
for (l = 0; l < indy; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhd4*(j + k1) + nn;
j2 = nxhd4*(j + k2) + nn;
t1 = sct[kmr*j];
v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1),
crealf(t1));
/* vector loop over elements in blocks of 2 */
for (i = 0; i < nxhs; i+=2) {
/* t2 = t1*f[4*i+j2]; */
/* t3 = t1*f[1+4*i+j2]; */
/* t4 = t1*f[2+4*i+j2]; */
v_t2 = _mm512_load_ps((float *)&f[4*i+j2]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[4*i+j2] = f[4*i+j1] - t2; */
/* f[1+4*i+j2] = f[1+4*i+j1] - t3; */
/* f[2+4*i+j2] = f[2+4*i+j1] - t4; */
v_t3 = _mm512_load_ps((float *)&f[4*i+j1]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*i+j2],v_t4);
/* f[4*i+j1] += t2; */
/* f[1+4*i+j1] += t3; */
/* f[2+4*i+j1] += t4; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*i+j1],v_t4);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t2 = t1*f[4*i+j2];
t3 = t1*f[1+4*i+j2];
t4 = t1*f[2+4*i+j2];
f[4*i+j2] = f[4*i+j1] - t2;
f[1+4*i+j2] = f[1+4*i+j1] - t3;
f[2+4*i+j2] = f[2+4*i+j1] - t4;
f[4*i+j1] += t2;
f[1+4*i+j1] += t3;
f[2+4*i+j1] += t4;
}
}
}
ns = ns2;
}
/* unscramble modes kx = 0, nx/2 */
for (k = 1; k < nyh; k++) {
joff = nxhd4*k;
k1 = nxhd4*ny - joff + nn;
joff += nn;
/* for (jj = 0; jj < 3; jj++) { */
/* t1 = f[jj+k1]; */
v_t1 = _mm512_mask_load_ps(v_t1,_mm512_int2mask(63),
(float *)&f[k1]);
/* f[jj+k1] = 0.5*(cimagf(f[jj+joff] + t1) */
/* + crealf(f[jj+joff] - t1)*_Complex_I); */
v_t2 = _mm512_mask_load_ps(v_t2,_mm512_int2mask(63),
(float *)&f[joff]);
v_t3 = _mm512_mask_add_ps(v_t3,_mm512_int2mask(42),v_t2,v_t1);
v_t3 = _mm512_mask_sub_ps(v_t3,_mm512_int2mask(21),v_t2,v_t1);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,177);
v_t3 = _mm512_mul_ps(v_half,v_t3);
_mm512_mask_store_ps((float *)&f[k1],_mm512_int2mask(63),v_t3);
/* f[jj+joff] = 0.5*(crealf(f[jj+joff] + t1) */
/* + cimagf(f[jj+joff] - t1)*_Complex_I); */
/* } */
v_t2 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(42),v_t2,v_t1);
v_t2 = _mm512_mask_add_ps(v_t2,_mm512_int2mask(21),v_t2,v_t1);
v_t2 = _mm512_mul_ps(v_half,v_t2);
_mm512_mask_store_ps((float *)&f[joff],_mm512_int2mask(63),v_t2);
}
}
return;
/* forward fourier transform */
L230: nryb = nxhyz/ny;
nry = nxyz/ny;
nrxb = nxhyz/nxh;
nrx = nxyz/nxh;
v_l = _mm512_set_epi32(15,13,11,9,14,12,10,8,7,5,3,1,6,4,2,0);
#pragma omp parallel for \
private(i,j,k,l,n,ns,ns2,nss,km,kmr,k1,k2,jj,j1,j2,nn,joff,at1,at2, \
t1,t2,t3,t4,v_it,v_kmr,v_t1,v_t2,v_t3,v_t4,v_t5)
for (n = nzi-1; n < nzt; n++) {
nn = nxhyd*n;
/* scramble modes kx = 0, nx/2 */
for (k = 1; k < nyh; k++) {
joff = nxhd4*k;
k1 = nxhd4*ny - joff + nn;
joff += nn;
/* for (jj = 0; jj < 3; jj++) { */
/* t1 = cimagf(f[jj+k1]) + crealf(f[jj+k1])*_Complex_I; */
v_t1 = _mm512_mask_load_ps(v_t1,_mm512_int2mask(63),
(float *)&f[k1]);
v_t1 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,177);
/* f[jj+k1] = conjf(f[jj+joff] - t1); */
v_t2 = _mm512_mask_load_ps(v_t2,_mm512_int2mask(63),
(float *)&f[joff]);
v_t3 = _mm512_mask_sub_ps(v_t3,_mm512_int2mask(63),v_t2,v_t1);
v_t3 = _mm512_mask_sub_ps(v_t3,_mm512_int2mask(42),
v_zero,v_t3);
_mm512_mask_store_ps((float *)&f[k1],_mm512_int2mask(63),v_t3);
/* f[jj+joff] += t1; */
/* } */
v_t2 = _mm512_mask_add_ps(v_t2,_mm512_int2mask(63),v_t2,v_t1);
_mm512_mask_store_ps((float *)&f[joff],_mm512_int2mask(63),
v_t2);
}
/* bit-reverse array elements in y */
for (k = 0; k < ny; k++) {
joff = nxhd4*k + nn;
k1 = (mixup[k] - 1)/nryb;
if (k < k1) {
k1 = nxhd4*k1 + nn;
/* vector loop over elements in blocks of 2 */
for (i = 0; i < nxhs; i+=2) {
/* t1 = f[4*i+k1]; */
/* t2 = f[1+4*i+k1]; */
/* t3 = f[2+4*i+k1]; */
v_t1 = _mm512_load_ps((float *)&f[4*i+k1]);
/* f[4*i+k1] = f[4*i+joff]; */
/* f[1+4*i+k1] = f[1+4*i+joff]; */
/* f[2+4*i+k1] = f[2+4*i+joff]; */
v_t2 = _mm512_load_ps((float *)&f[4*i+joff]);
_mm512_store_ps((float *)&f[4*i+k1],v_t2);
/* f[4*i+joff] = t1; */
/* f[1+4*i+joff] = t2; */
/* f[2+4*i+joff] = t3; */
_mm512_store_ps((float *)&f[4*i+joff],v_t1);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t1 = f[4*i+k1];
t2 = f[1+4*i+k1];
t3 = f[2+4*i+k1];
f[4*i+k1] = f[4*i+joff];
f[1+4*i+k1] = f[1+4*i+joff];
f[2+4*i+k1] = f[2+4*i+joff];
f[4*i+joff] = t1;
f[1+4*i+joff] = t2;
f[2+4*i+joff] = t3;
}
}
}
/* then transform in y */
ns = 1;
for (l = 0; l < indy; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhd4*(j + k1) + nn;
j2 = nxhd4*(j + k2) + nn;
t1 = conjf(sct[kmr*j]);
v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1),
crealf(t1));
/* vector loop over elements in blocks of 2 */
for (i = 0; i < nxhs; i+=2) {
/* t2 = t1*f[4*i+j2]; */
/* t3 = t1*f[1+4*i+j2]; */
/* t4 = t1*f[2+4*i+j2]; */
v_t2 = _mm512_load_ps((float *)&f[4*i+j2]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[4*i+j2] = f[4*i+j1] - t2; */
/* f[1+4*i+j2] = f[1+4*i+j1] - t3; */
/* f[2+4*i+j2] = f[2+4*i+j1] - t4; */
v_t3 = _mm512_load_ps((float *)&f[4*i+j1]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*i+j2],v_t4);
/* f[4*i+j1] += t2; */
/* f[1+4*i+j1] += t3; */
/* f[2+4*i+j1] += t4; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*i+j1],v_t4);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t2 = t1*f[4*i+j2];
t3 = t1*f[1+4*i+j2];
t4 = t1*f[2+4*i+j2];
f[4*i+j2] = f[4*i+j1] - t2;
f[1+4*i+j2] = f[1+4*i+j1] - t3;
f[2+4*i+j2] = f[2+4*i+j1] - t4;
f[4*i+j1] += t2;
f[1+4*i+j1] += t3;
f[2+4*i+j1] += t4;
}
}
}
ns = ns2;
}
/* scramble coefficients */
kmr = nxyz/nx;
v_kmr = _mm512_set1_epi32(2*kmr);
for (k = 0; k < ny; k++) {
joff = nxhd4*k + nn;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhhs; j+=2) {
/* t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m);
v_t3 = _mm512_i32gather_ps(v_it,(float *)sct,4);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,177);
/* for (jj = 0; jj < 3; jj++) { */
/* t2 = conjf(f[jj+4*(nxh-j)+joff]); */
v_t2 = _mm512_loadunpacklo_ps(v_t2,
(float *)&f[4*(nxh-j-1)+joff]);
v_t2 = _mm512_loadunpackhi_ps(v_t2,
(float *)&f[4*(nxh-j-1)+joff+8]);
/* reverse data */
v_t2 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t2);
v_t2 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(43690),
v_zero,v_t2);
/* t1 = f[jj+4*j+joff] + t2; */
v_t4 = _mm512_load_ps((float *)&f[4*j+joff]);
v_t1 = _mm512_add_ps(v_t4,v_t2);
/* t2 = (f[jj+4*j+joff] - t2)*t3; */
v_t2 = _mm512_sub_ps(v_t4,v_t2);
v_t5 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,160);
v_t5 = _mm512_mul_ps(v_t2,v_t5);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t5,v_t4);
/* f[jj+4*j+joff] = t1 + t2; */
v_t3 = _mm512_add_ps(v_t1,v_t2);
/* f[jj+4*(nxh-j)+joff] = conjf(t1 - t2); */
/* } */
v_t4 = _mm512_sub_ps(v_t1,v_t2);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(43690),
v_zero,v_t4);
/* reverse data */
v_t4 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t4);
if (j==0) {
_mm512_mask_store_ps((float *)&f[4*j+joff],
_mm512_int2mask(65280),v_t3);
_mm512_mask_packstorelo_ps((float *)&f[4*(nxh-j-1)+joff],
_mm512_int2mask(255),v_t4);
_mm512_mask_packstorehi_ps((float *)&f[4*(nxh-j-1)+joff+8],
_mm512_int2mask(255),v_t4);
}
else {
_mm512_store_ps((float *)&f[4*j+joff],v_t3);
_mm512_packstorelo_ps((float *)&f[4*(nxh-j-1)+joff],v_t4);
_mm512_packstorehi_ps((float *)&f[4*(nxh-j-1)+joff+8],v_t4);
}
}
/* loop over remaining elements */
for (j = itn; j < nxhh; j++) {
t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I;
for (jj = 0; jj < 3; jj++) {
t2 = conjf(f[jj+4*(nxh-j)+joff]);
t1 = f[jj+4*j+joff] + t2;
t2 = (f[jj+4*j+joff] - t2)*t3;
f[jj+4*j+joff] = t1 + t2;
f[jj+4*(nxh-j)+joff] = conjf(t1 - t2);
}
}
}
for (k = 0; k < ny; k++) {
joff = nxhd4*k + nn;
/* for (jj = 0; jj < 3; jj++) { */
/* f[jj+4*nxhh+joff] = 2.0*conjf(f[jj+4*nxhh+joff]); */
v_t1 = _mm512_mask_load_ps(v_t1,_mm512_int2mask(63),
(float *)&f[4*nxhh+joff]);
v_t1 = _mm512_mask_sub_ps(v_t1,_mm512_int2mask(42),v_zero,
v_t1);
v_t1 = _mm512_add_ps(v_t1,v_t1);
_mm512_mask_store_ps((float *)&f[4*nxhh+joff],
_mm512_int2mask(63),v_t1);
/* f[jj+joff] = (crealf(f[jj+joff]) + cimagf(f[jj+joff])) */
/* + (crealf(f[jj+joff]) */
/* - cimagf(f[jj+joff]))*_Complex_I; */
/* } */
v_t2 = _mm512_mask_load_ps(v_t2,_mm512_int2mask(63),
(float *)&f[joff]);
v_t1 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t3 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(42),v_t1,v_t2);
v_t3 = _mm512_mask_add_ps(v_t3,_mm512_int2mask(21),v_t1,v_t2);
_mm512_mask_store_ps((float *)&f[joff],_mm512_int2mask(63),
v_t3);
}
/* bit-reverse array elements in x */
for (j = 0; j < nxh; j++) {
j1 = (mixup[j] - 1)/nrxb;
if (j < j1) {
for (i = 0; i < ny; i++) {
joff = nxhd4*i + nn;
/* t1 = f[4*j1+joff]; */
/* t2 = f[1+4*j1+joff]; */
/* t3 = f[2+4*j1+joff]; */
v_t1 = _mm512_mask_loadunpacklo_ps(v_t1,
_mm512_int2mask(255),(float *)&f[4*j1+joff]);
v_t1 = _mm512_mask_loadunpackhi_ps(v_t1,
_mm512_int2mask(255),(float *)&f[4*j1+joff+8]);
/* f[4*j1+joff] = f[4*j+joff]; */
/* f[1+4*j1+joff] = f[1+4*j+joff]; */
/* f[2+4*j1+joff] = f[2+4*j+joff]; */
v_t2 = _mm512_mask_loadunpacklo_ps(v_t2,
_mm512_int2mask(255),(float *)&f[4*j+joff]);
v_t2 = _mm512_mask_loadunpackhi_ps(v_t2,
_mm512_int2mask(255),(float *)&f[4*j+joff+8]);
_mm512_mask_packstorelo_ps((float *)&f[4*j1+joff],
_mm512_int2mask(255),v_t2);
_mm512_mask_packstorehi_ps((float *)&f[4*j1+joff+8],
_mm512_int2mask(255),v_t2);
/* f[4*j+joff] = t1; */
/* f[1+4*j+joff] = t2; */
/* f[2+4*j+joff] = t3; */
_mm512_mask_packstorelo_ps((float *)&f[4*j+joff],
_mm512_int2mask(255),v_t1);
_mm512_mask_packstorehi_ps((float *)&f[4*j+joff+8],
_mm512_int2mask(255),v_t1);
}
}
}
/* finally transform in x */
ns = 1;
for (l = 0; l < indx1; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
nss = 2*(ns/2);
v_kmr = _mm512_set1_epi32(2*kmr);
for (k = 0; k < km; k++) {
k1 = 4*ns2*k;
k2 = k1 + 4*ns;
for (i = 0; i < ny; i++) {
joff = nxhd4*i + nn;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nss; j+=2) {
/* t1 = conjf(sct[kmr*j]); */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m);
v_t1 = _mm512_i32gather_ps(v_it,(float *)sct,4);
v_t1 = _mm512_mask_sub_ps(v_t1,_mm512_int2mask(43690),
v_zero,v_t1);
/* t2 = t1*f[4*j+k2+joff]; */
/* t3 = t1*f[1+4*j+k2+joff]; */
/* t4 = t1*f[2+4*j+k2+joff]; */
v_t2 = _mm512_load_ps((float *)&f[4*j+k2+joff]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[4*j+k2+joff] = f[4*j+k1+joff] - t2; */
/* f[1+4*j+k2+joff] = f[1+4*j+k1+joff] - t3; */
/* f[2+4*j+k2+joff] = f[2+4*j+k1+joff] - t4; */
v_t3 = _mm512_load_ps((float *)&f[4*j+k1+joff]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*j+k2+joff],v_t4);
/* f[4*j+k1+joff] += t2; */
/* f[1+4*j+k1+joff] += t3; */
/* f[2+4*j+k1+joff] += t4; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*j+k1+joff],v_t4);
}
/* loop over remaining elements */
for (j = nss; j < ns; j++) {
t1 = conjf(sct[kmr*j]);
t2 = t1*f[4*j+k2+joff];
t3 = t1*f[1+4*j+k2+joff];
t4 = t1*f[2+4*j+k2+joff];
f[4*j+k2+joff] = f[4*j+k1+joff] - t2;
f[1+4*j+k2+joff] = f[1+4*j+k1+joff] - t3;
f[2+4*j+k2+joff] = f[2+4*j+k1+joff] - t4;
f[4*j+k1+joff] += t2;
f[1+4*j+k1+joff] += t3;
f[2+4*j+k1+joff] += t4;
}
}
}
ns = ns2;
}
/* swap complex components */
for (i = 0; i < ny; i++) {
joff = nxhd4*i + nn;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* f[3+4*j+joff] = cimagf(f[2+4*j+joff]) */
/* + cimagf(f[3+4*j+joff])*_Complex_I; */
/* at1 = crealf(f[2+4*j+joff]); */
/* f[2+4*j+joff] = cimagf(f[4*j+joff]) */
/* + cimagf(f[1+4*j+joff])*_Complex_I; */
/* at2 = crealf(f[1+4*j+joff]); */
/* f[1+4*j+joff] = at1 + 0.0*_Complex_I; */
/* f[4*j+joff] = crealf(f[4*j+joff]) + at2*_Complex_I; */
v_t1 = _mm512_load_ps((float *)&f[4*j+joff]);
v_t1 = (__m512)_mm512_permutevar_epi32(v_l,(__m512i)v_t1);
_mm512_store_ps((float *)&f[4*j+joff],v_t1);
}
/* loop over remaining elements */
for (j = nxhs; j < nxh; j++) {
f[3+4*j+joff] = cimagf(f[2+4*j+joff])
+ cimagf(f[3+4*j+joff])*_Complex_I;
at1 = crealf(f[2+4*j+joff]);
f[2+4*j+joff] = cimagf(f[4*j+joff])
+ cimagf(f[1+4*j+joff])*_Complex_I;
at2 = crealf(f[1+4*j+joff]);
f[1+4*j+joff] = at1 + 0.0*_Complex_I;
f[4*j+joff] = crealf(f[4*j+joff]) + at2*_Complex_I;
}
}
}
return;
}
/*--------------------------------------------------------------------*/
void ckncfft3rm3z(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int indz,
int nyi, int nyp, int nxhd, int nyd, int nzd,
int nxhyzd, int nxyzhd) {
/* this subroutine performs the z part of 3 three dimensional complex to
real fast fourier transforms and their inverses, for a subset of y,
using complex arithmetic, with OpenMP
for isign = (-1,1), input: all, output: f
for isign = -1, approximate flop count: N*(5*log2(N) + 19/2)
for isign = 1, approximate flop count: N*(5*log2(N) + 15/2)
where N = (nx/2)*ny*nz
indx/indy/indz = exponent which determines length in x/y/z direction,
where nx=2**indx, ny=2**indy, nz=2**indz
if isign = -1, three inverse fourier transforms in z are performed
f[l][k][j][0:2] = sum(f[i][k][j][0:2]*exp(-sqrt(-1)*2pi*l*i/nz))
if isign = 1, three forward fourier transforms in z are performed
f[i][m][n][0:2] = sum(f[l][m][n][0:2]*exp(sqrt(-1)*2pi*l*i/nz))
mixup = array of bit reversed addresses
sct = sine/cosine table
nyi = initial y index used
nyp = number of y indices used
nxhd = second dimension of f
nyd,nzd = third and fourth dimensions of f
nxhyzd = maximum of (nx/2,ny,nz)
nxyzhd = maximum of (nx,ny,nz)/2
fourier coefficients are stored as follows:
f[l][k][j][0:2] = real, imaginary part of mode j,k,l
where 0 <= j < nx/2, 0 <= k < ny, 0 <= l < nz, except for
f[l][k][0][0:2], = real, imaginary part of mode nx/2,k,l,
where ny/2+1 <= k < ny and 0 <= l < nz, and
f[l][0][0][0:2] = real, imaginary part of mode nx/2,0,l,
f[l][ny/2][0][0:2] = real, imaginary part mode nx/2,ny/2,l,
where nz/2+1 <= l < nz, and
imag(f[0][0][0][0:2]) = real part of mode nx/2,0,0
imag(f[0][ny/2][0][0:2]) = real part of mode nx/2,ny/2,0
imag(f[nz/2][0][0][0:2]) = real part of mode nx/2,0,nz/2
imag(f[nz/2][ny/2][0][0:2]) = real part of mode nx/2,ny/2,nz/2
using jpl storage convention, as described in:
E. Huang, P. C. Liewer, V. K. Decyk, and R. D. Ferraro, "Concurrent
Three-Dimensional Fast Fourier Transform Algorithms for Coarse-Grained
Distributed Memory Parallel Computers," Caltech CRPC Report 217-50,
December 1993.
requires KNC, f needs to be 64 byte aligned
nxhd need to be a multiple of 2
f needs to have 4 components
written by viktor k. decyk, ucla
local data */
int indx1, ndx1yz, nx, nxh, ny, nyh;
int nz, nzh, nxyz, nxhyz, nyt, nrz, nrzb, nxhd4, nxhyd, ioff;
int i, j, k, l, n, ll, jj, j1, j2, k1, k2, l1, ns, ns2, km, kmr;
int i0, i1;
int nxhs;
float complex t1, t2, t3, t4;
__m512 v_zero, v_t1, v_t2, v_t3, v_t4;
if (isign==0)
return;
indx1 = indx - 1;
ndx1yz = indx1 > indy ? indx1 : indy;
ndx1yz = ndx1yz > indz ? ndx1yz : indz;
nx = 1L<<indx;
nxh = nx/2;
ny = 1L<<indy;
nyh = ny/2;
nz = 1L<<indz;
nzh = nz/2;
nxyz = nx > ny ? nx : ny;
nxyz = nxyz > nz ? nxyz : nz;
nxhyz = 1L<<ndx1yz;
nyt = nyi + nyp - 1;
nxhd4 = 4*nxhd;
nxhyd = nxhd4*nyd;
nxhs = 2*(nxh/2);
v_zero = _mm512_setzero_ps();
v_t1 = _mm512_setzero_ps();
v_t2 = _mm512_setzero_ps();
v_t3 = _mm512_setzero_ps();
v_t4 = _mm512_setzero_ps();
if (isign > 0)
goto L110;
/* inverse fourier transform */
nrzb = nxhyz/nz;
nrz = nxyz/nz;
#pragma omp parallel for \
private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,ll,l1,i0,i1,ioff,t1,t2,t3, \
t4,v_t1,v_t2,v_t3,v_t4)
for (n = nyi-1; n < nyt; n++) {
ioff = nxhd4*n;
/* bit-reverse array elements in z */
for (l = 0; l < nz; l++) {
ll = nxhyd*l;
l1 = (mixup[l] - 1)/nrzb;
if (l < l1) {
l1 = nxhyd*l1;
i0 = ioff + ll;
i1 = ioff + l1;
/* vector loop over elements in blocks of 2 */
for (i = 0; i < nxhs; i+=2) {
/* t1 = f[4*i+i1]; */
/* t2 = f[1+4*i+i1]; */
/* t3 = f[2+4*i+i1]; */
v_t1 = _mm512_load_ps((float *)&f[4*i+i1]);
/* f[4*i+i1] = f[4*i+i0]; */
/* f[1+4*i+i1] = f[1+4*i+i0]; */
/* f[2+4*i+i1] = f[2+4*i+i0]; */
v_t2 = _mm512_load_ps((float *)&f[4*i+i0]);
_mm512_store_ps((float *)&f[4*i+i1],v_t2);
/* f[4*i+i0] = t1; */
/* f[1+4*i+i0] = t2; */
/* f[2+4*i+i0] = t3; */
_mm512_store_ps((float *)&f[4*i+i0],v_t1);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t1 = f[4*i+i1];
t2 = f[1+4*i+i1];
t3 = f[2+4*i+i1];
f[4*i+i1] = f[4*i+i0];
f[1+4*i+i1] = f[1+4*i+i0];
f[2+4*i+i1] = f[2+4*i+i0];
f[4*i+i0] = t1;
f[1+4*i+i0] = t2;
f[2+4*i+i0] = t3;
}
}
}
/* finally transform in z */
ns = 1;
for (l = 0; l < indz; l++) {
ns2 = ns + ns;
km = nzh/ns;
kmr = km*nrz;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhyd*(j + k1);
j2 = nxhyd*(j + k2);
t1 = sct[kmr*j];
v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1),
crealf(t1));
i0 = ioff + j1;
i1 = ioff + j2;
/* vector loop over elements in blocks of 2 */
for (i = 0; i < nxhs; i+=2) {
/* t2 = t1*f[4*i+i1]; */
/* t3 = t1*f[1+4*i+i1]; */
/* t4 = t1*f[2+4*i+i1]; */
v_t2 = _mm512_load_ps((float *)&f[4*i+i1]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[4*i+i1] = f[4*i+i0] - t2; */
/* f[1+4*i+i1] = f[1+4*i+i0] - t3; */
/* f[2+4*i+i1] = f[2+4*i+i0] - t4; */
v_t3 = _mm512_load_ps((float *)&f[4*i+i0]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*i+i1],v_t4);
/* f[4*i+i0] += t2; */
/* f[1+4*i+i0] += t3; */
/* f[2+4*i+i0] += t4; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*i+i0],v_t4);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t2 = t1*f[4*i+i1];
t3 = t1*f[1+4*i+i1];
t4 = t1*f[2+4*i+i1];
f[4*i+i1] = f[4*i+i0] - t2;
f[1+4*i+i1] = f[1+4*i+i0] - t3;
f[2+4*i+i1] = f[2+4*i+i0] - t4;
f[4*i+i0] += t2;
f[1+4*i+i0] += t3;
f[2+4*i+i0] += t4;
}
}
}
ns = ns2;
}
}
/* unscramble modes kx = 0, nx/2 */
if (nyi==1) {
for (n = 1; n < nzh; n++) {
ll = nxhyd*n;
l1 = nxhyd*nz - ll;
for (jj = 0; jj < 3; jj++) {
t1 = f[jj+l1];
f[jj+l1] = 0.5*(cimagf(f[jj+ll] + t1)
+ crealf(f[jj+ll] - t1)*_Complex_I);
f[jj+ll] = 0.5*(crealf(f[jj+ll] + t1)
+ cimagf(f[jj+ll] - t1)*_Complex_I);
}
}
}
if ((nyi <= (nyh+1)) && (nyt >= (nyh+1))) {
for (n = 1; n < nzh; n++) {
ll = nxhyd*n;
l1 = nxhyd*nz - ll;
i1 = nxhd4*nyh;
i0 = i1 + ll;
i1 += l1;
for (jj = 0; jj < 3; jj++) {
t1 = f[jj+i1];
f[jj+i1] = 0.5*(cimagf(f[jj+i0] + t1)
+ crealf(f[jj+i0] - t1)*_Complex_I);
f[jj+i0] = 0.5*(crealf(f[jj+i0] + t1)
+ cimagf(f[jj+i0] - t1)*_Complex_I);
}
}
}
return;
/* forward fourier transform */
L110: nrzb = nxhyz/nz;
nrz = nxyz/nz;
/* scramble modes kx = 0, nx/2 */
if (nyi==1) {
for (n = 1; n < nzh; n++) {
ll = nxhyd*n;
l1 = nxhyd*nz - ll;
for (jj = 0; jj < 3; jj++) {
t1 = cimagf(f[jj+l1]) + crealf(f[jj+l1])*_Complex_I;
f[jj+l1] = conjf(f[jj+ll] - t1);
f[jj+ll] += t1;
}
}
}
if ((nyi <= (nyh+1)) && (nyt >= (nyh+1))) {
for (n = 1; n < nzh; n++) {
ll = nxhyd*n;
l1 = nxhyd*nz - ll;
i1 = nxhd4*nyh;
i0 = i1 + ll;
i1 += l1;
for (jj = 0; jj < 3; jj++) {
t1 = cimagf(f[jj+i1]) + crealf(f[jj+i1])*_Complex_I;
f[jj+i1] = conjf(f[jj+i0] - t1);
f[jj+i0] += t1;
}
}
}
#pragma omp parallel for \
private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,ll,l1,i0,i1,ioff,t1,t2,t3, \
t4,v_t1,v_t2,v_t3,v_t4)
for (n = nyi-1; n < nyt; n++) {
ioff = nxhd4*n;
/* bit-reverse array elements in z */
for (l = 0; l < nz; l++) {
ll = nxhyd*l;
l1 = (mixup[l] - 1)/nrzb;
if (l < l1) {
l1 = nxhyd*l1;
i0 = ioff + ll;
i1 = ioff + l1;
/* vector loop over elements in blocks of 2 */
for (i = 0; i < nxhs; i+=2) {
/* t1 = f[4*i+i1]; */
/* t2 = f[1+4*i+i1]; */
/* t3 = f[2+4*i+i1]; */
v_t1 = _mm512_load_ps((float *)&f[4*i+i1]);
/* f[4*i+i1] = f[4*i+i0]; */
/* f[1+4*i+i1] = f[1+4*i+i0]; */
/* f[2+4*i+i1] = f[2+4*i+i0]; */
v_t2 = _mm512_load_ps((float *)&f[4*i+i0]);
_mm512_store_ps((float *)&f[4*i+i1],v_t2);
/* f[4*i+i0] = t1; */
/* f[1+4*i+i0] = t2; */
/* f[2+4*i+i0] = t3; */
_mm512_store_ps((float *)&f[4*i+i0],v_t1);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t1 = f[4*i+i1];
t2 = f[1+4*i+i1];
t3 = f[2+4*i+i1];
f[4*i+i1] = f[4*i+i0];
f[1+4*i+i1] = f[1+4*i+i0];
f[2+4*i+i1] = f[2+4*i+i0];
f[4*i+i0] = t1;
f[1+4*i+i0] = t2;
f[2+4*i+i0] = t3;
}
}
}
/* first transform in z */
ns = 1;
for (l = 0; l < indz; l++) {
ns2 = ns + ns;
km = nzh/ns;
kmr = km*nrz;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhyd*(j + k1);
j2 = nxhyd*(j + k2);
t1 = conjf(sct[kmr*j]);
v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1),
crealf(t1));
i0 = ioff + j1;
i1 = ioff + j2;
/* vector loop over elements in blocks of 2 */
for (i = 0; i < nxhs; i+=2) {
/* t2 = t1*f[4*i+i1]; */
/* t3 = t1*f[1+4*i+i1]; */
/* t4 = t1*f[2+4*i+i1]; */
v_t2 = _mm512_load_ps((float *)&f[4*i+i1]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[4*i+i1] = f[4*i+i0] - t2; */
/* f[1+4*i+i1] = f[1+4*i+i0] - t3; */
/* f[2+4*i+i1] = f[2+4*i+i0] - t4; */
v_t3 = _mm512_load_ps((float *)&f[4*i+i0]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*i+i1],v_t4);
/* f[4*i+i0] += t2; */
/* f[1+4*i+i0] += t3; */
/* f[2+4*i+i0] += t4; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*i+i0],v_t4);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t2 = t1*f[4*i+i1];
t3 = t1*f[1+4*i+i1];
t4 = t1*f[2+4*i+i1];
f[4*i+i1] = f[4*i+i0] - t2;
f[1+4*i+i1] = f[1+4*i+i0] - t3;
f[2+4*i+i1] = f[2+4*i+i0] - t4;
f[4*i+i0] += t2;
f[1+4*i+i0] += t3;
f[2+4*i+i0] += t4;
}
}
}
ns = ns2;
}
}
return;
}
/*--------------------------------------------------------------------*/
void ckncwfft3rmx(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int indz,
int nxhd, int nyd, int nzd, int nxhyzd, int nxyzhd) {
/* wrapper function for real to complex fft, with packed data */
/* local data */
int ny, nz;
static int nyi = 1, nzi = 1;
/* calculate range of indices */
ny = 1L<<indy;
nz = 1L<<indz;
/* inverse fourier transform */
if (isign < 0) {
/* perform xy fft */
ckncfft3rmxy(f,isign,mixup,sct,indx,indy,indz,nzi,nz,nxhd,nyd,nzd,
nxhyzd,nxyzhd);
/* perform z fft */
ckncfft3rmz(f,isign,mixup,sct,indx,indy,indz,nyi,ny,nxhd,nyd,nzd,
nxhyzd,nxyzhd);
}
/* forward fourier transform */
else if (isign > 0) {
/* perform z fft */
ckncfft3rmz(f,isign,mixup,sct,indx,indy,indz,nyi,ny,nxhd,nyd,nzd,
nxhyzd,nxyzhd);
/* perform xy fft */
ckncfft3rmxy(f,isign,mixup,sct,indx,indy,indz,nzi,nz,nxhd,nyd,nzd,
nxhyzd,nxyzhd);
}
return;
}
/*--------------------------------------------------------------------*/
void ckncwfft3rm3(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int indz,
int nxhd, int nyd, int nzd, int nxhyzd, int nxyzhd) {
/* wrapper function for 3 2d real to complex ffts, with packed data */
/* local data */
int ny, nz;
static int nyi = 1, nzi = 1;
/* calculate range of indices */
ny = 1L<<indy;
nz = 1L<<indz;
/* inverse fourier transform */
if (isign < 0) {
/* perform xy fft */
ckncfft3rm3xy(f,isign,mixup,sct,indx,indy,indz,nzi,nz,nxhd,nyd,
nzd,nxhyzd,nxyzhd);
/* perform z fft */
ckncfft3rm3z(f,isign,mixup,sct,indx,indy,indz,nyi,ny,nxhd,nyd,nzd,
nxhyzd,nxyzhd);
}
/* forward fourier transform */
else if (isign > 0) {
/* perform z fft */
ckncfft3rm3z(f,isign,mixup,sct,indx,indy,indz,nyi,ny,nxhd,nyd,nzd,
nxhyzd,nxyzhd);
/* perform xy fft */
ckncfft3rm3xy(f,isign,mixup,sct,indx,indy,indz,nzi,nz,nxhd,nyd,
nzd,nxhyzd,nxyzhd);
}
return;
}
/* Interfaces to Fortran */
/*--------------------------------------------------------------------*/
void ckncgbppush3lt_(float *ppart, float *fxyz, float *bxyz ,int *kpic,
float *qbm, float *dt, float *dtc, float *ek,
int *idimp, int *nppmx, int *nx, int *ny, int *nz,
int *mx, int *my, int *mz, int *nxv, int *nyv,
int *nzv, int *mx1, int *my1, int *mxyz1,
int *ipbc) {
ckncgbppush3lt(ppart,fxyz,bxyz,kpic,*qbm,*dt,*dtc,ek,*idimp,*nppmx,
*nx,*ny,*nz,*mx,*my,*mz,*nxv,*nyv,*nzv,*mx1,*my1,
*mxyz1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
void ckncgbppushf3lt_(float *ppart, float *fxyz, float *bxyz, int *kpic,
int *ncl, int *ihole, float *qbm, float *dt,
float *dtc, float *ek, int *idimp, int *nppmx,
int *nx, int *ny, int *nz, int *mx, int *my,
int *mz, int *nxv, int *nyv, int *nzv, int *mx1,
int *my1, int *mxyz1, int *ntmax, int *irc) {
ckncgbppushf3lt(ppart,fxyz,bxyz,kpic,ncl,ihole,*qbm,*dt,*dtc,ek,
*idimp,*nppmx,*nx,*ny,*nz,*mx,*my,*mz,*nxv,*nyv,*nzv,
*mx1,*my1,*mxyz1,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void ckncgrbppush3lt_(float *ppart, float *fxyz, float *bxyz, int *kpic,
float *qbm, float *dt, float *dtc, float *ci,
float *ek, int *idimp, int *nppmx, int *nx,
int *ny, int *nz, int *mx, int *my, int *mz,
int *nxv, int *nyv, int *nzv, int *mx1, int *my1,
int *mxyz1, int *ipbc) {
ckncgrbppush3lt(ppart,fxyz,bxyz,kpic,*qbm,*dt,*dtc,*ci,ek,*idimp,
*nppmx,*nx,*ny,*nz,*mx,*my,*mz,*nxv,*nyv,*nzv,*mx1,
*my1,*mxyz1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
void ckncgrbppushf3lt_(float *ppart, float *fxyz, float *bxyz,
int *kpic, int *ncl, int *ihole, float *qbm,
float *dt, float *dtc, float *ci, float *ek,
int *idimp, int *nppmx, int *nx, int *ny,
int *nz, int *mx, int *my, int *mz, int *nxv,
int *nyv, int *nzv, int *mx1, int *my1,
int *mxyz1, int *ntmax, int *irc) {
ckncgrbppushf3lt(ppart,fxyz,bxyz,kpic,ncl,ihole,*qbm,*dt,*dtc,*ci,ek,
*idimp,*nppmx,*nx,*ny,*nz,*mx,*my,*mz,*nxv,*nyv,
*nzv,*mx1,*my1,*mxyz1,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void ckncgppost3lt_(float *ppart, float *q, int *kpic, float *qm,
int *nppmx, int *idimp, int *mx, int *my, int *mz,
int *nxv, int *nyv, int *nzv, int *mx1, int *my1,
int *mxyz1) {
ckncgppost3lt(ppart,q,kpic,*qm,*nppmx,*idimp,*mx,*my,*mz,*nxv,*nyv,
*nzv,*mx1,*my1,*mxyz1);
return;
}
/*--------------------------------------------------------------------*/
void cknc2gppost3lt_(float *ppart, float *q, int *kpic, float *qm,
int *nppmx, int *idimp, int *mx, int *my, int *mz,
int *nxv, int *nyv, int *nzv, int *mx1, int *my1,
int *mxyz1) {
cknc2gppost3lt(ppart,q,kpic,*qm,*nppmx,*idimp,*mx,*my,*mz,*nxv,*nyv,
*nzv,*mx1,*my1,*mxyz1);
return;
}
/*--------------------------------------------------------------------*/
void ckncgjppost3lt_(float *ppart, float *cu, int *kpic, float *qm,
float *dt, int *nppmx, int *idimp, int *nx,
int *ny, int *nz, int *mx, int *my, int *mz,
int *nxv, int *nyv, int *nzv, int *mx1, int *my1,
int *mxyz1, int *ipbc) {
ckncgjppost3lt(ppart,cu,kpic,*qm,*dt,*nppmx,*idimp,*nx,*ny,*nz,*mx,
*my,*mz,*nxv,*nyv,*nzv,*mx1,*my1,*mxyz1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
void ckncgrjppost3lt_(float *ppart, float *cu, int *kpic, float *qm,
float *dt, float *ci, int *nppmx, int *idimp,
int *nx, int *ny, int *nz, int *mx, int *my,
int *mz, int *nxv, int *nyv, int *nzv, int *mx1,
int *my1, int *mxyz1, int *ipbc) {
ckncgrjppost3lt(ppart,cu,kpic,*qm,*dt,*ci,*nppmx,*idimp,*nx,*ny,*nz,
*mx,*my,*mz,*nxv,*nyv,*nzv,*mx1,*my1,*mxyz1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
void ckncpporder3lt_(float *ppart, float *ppbuff, int *kpic, int *ncl,
int *ihole, int *idimp, int *nppmx, int *nx,
int *ny, int *nz, int *mx, int *my, int *mz,
int *mx1, int *my1, int *mz1, int *npbmx,
int *ntmax, int *irc) {
ckncpporder3lt(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*nx,*ny,*nz,
*mx,*my,*mz,*mx1,*my1,*mz1,*npbmx,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void ckncpporderf3lt_(float *ppart, float *ppbuff, int *kpic, int *ncl,
int *ihole, int *idimp, int *nppmx, int *mx1,
int *my1, int *mz1, int *npbmx, int *ntmax,
int *irc) {
ckncpporderf3lt(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*mx1,*my1,
*mz1,*npbmx,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void cknccguard3l_(float *fxyz, int *nx, int *ny, int *nz, int *nxe,
int *nye, int *nze) {
cknccguard3l(fxyz,*nx,*ny,*nz,*nxe,*nye,*nze);
return;
}
/*--------------------------------------------------------------------*/
void ckncacguard3l_(float *cu, int *nx, int *ny, int *nz, int *nxe,
int *nye, int *nze) {
ckncacguard3l(cu,*nx,*ny,*nz,*nxe,*nye,*nze);
return;
}
/*--------------------------------------------------------------------*/
void ckncaguard3l_(float *q, int *nx, int *ny, int *nz, int *nxe,
int *nye, int *nze) {
ckncaguard3l(q,*nx,*ny,*nz,*nxe,*nye,*nze);
return;
}
/*--------------------------------------------------------------------*/
void ckncmpois33_(float complex *q, float complex *fxyz, int *isign,
float complex *ffc, float *ax, float *ay, float *az,
float *affp, float *we, int *nx, int *ny, int *nz,
int *nxvh, int *nyv, int *nzv, int *nxhd, int *nyhd,
int *nzhd) {
ckncmpois33(q,fxyz,*isign,ffc,*ax,*ay,*az,*affp,we,*nx,*ny,*nz,*nxvh,
*nyv,*nzv,*nxhd,*nyhd,*nzhd);
return;
}
/*--------------------------------------------------------------------*/
void cknccuperp3_(float complex *cu, int *nx, int *ny, int *nz,
int *nxvh, int *nyv, int *nzv) {
cknccuperp3(cu,*nx,*ny,*nz,*nxvh,*nyv,*nzv);
return;
}
/*--------------------------------------------------------------------*/
void ckncibpois33_(float complex *cu, float complex *bxyz,
float complex *ffc, float *ci, float *wm, int *nx,
int *ny, int *nz, int *nxvh, int *nyv, int *nzv,
int *nxhd, int *nyhd, int *nzhd) {
ckncibpois33(cu,bxyz,ffc,*ci,wm,*nx,*ny,*nz,*nxvh,*nyv,*nzv,*nxhd,
*nyhd,*nzhd);
return;
}
/*--------------------------------------------------------------------*/
void ckncmaxwel3_(float complex *exyz, float complex *bxyz,
float complex *cu, float complex *ffc, float *ci,
float *dt, float *wf, float *wm, int *nx, int *ny,
int *nz, int *nxvh, int *nyv, int *nzv, int *nxhd,
int *nyhd, int *nzhd) {
ckncmaxwel3(exyz,bxyz,cu,ffc,*ci,*dt,wf,wm,*nx,*ny,*nz,*nxvh,*nyv,
*nzv,*nxhd,*nyhd,*nzhd);
return;
}
/*--------------------------------------------------------------------*/
void ckncemfield3_(float complex *fxyz, float complex *exyz,
float complex *ffc, int *isign, int *nx, int *ny,
int *nz, int *nxvh, int *nyv, int *nzv, int *nxhd,
int *nyhd, int *nzhd) {
ckncemfield3(fxyz,exyz,ffc,*isign,*nx,*ny,*nz,*nxvh,*nyv,*nzv,*nxhd,
*nyhd,*nzhd);
return;
}
/*--------------------------------------------------------------------*/
void ckncwfft3rmx_(float complex *f, int *isign, int *mixup,
float complex *sct, int *indx, int *indy, int *indz,
int *nxhd, int *nyd, int *nzd, int *nxhyzd,
int *nxyzhd) {
ckncwfft3rmx(f,*isign,mixup,sct,*indx,*indy,*indz,*nxhd,*nyd,*nzd,
*nxhyzd,*nxyzhd);
return;
}
/*--------------------------------------------------------------------*/
void ckncwfft3rm3_(float complex *f, int *isign, int *mixup,
float complex *sct, int *indx, int *indy, int *indz,
int *nxhd, int *nyd, int *nzd, int *nxhyzd,
int *nxyzhd) {
ckncwfft3rm3(f,*isign,mixup,sct,*indx,*indy,*indz,*nxhd,*nyd,*nzd,
*nxhyzd,*nxyzhd);
return;
}
|
GB_unop__isnan_bool_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__isnan_bool_fc64
// op(A') function: GB_unop_tran__isnan_bool_fc64
// C type: bool
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = (aij)
// unaryop: cij = GB_cisnan (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_cisnan (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = (aij) ; \
Cx [pC] = GB_cisnan (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNAN || GxB_NO_BOOL || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__isnan_bool_fc64
(
bool *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = (aij) ;
Cx [p] = GB_cisnan (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__isnan_bool_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__expm1_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__expm1_fp32_fp32)
// op(A') function: GB (_unop_tran__expm1_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = expm1f (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = expm1f (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = expm1f (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EXPM1 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__expm1_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = expm1f (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = expm1f (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__expm1_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
m_image.h | /*======================================================================
Maratis Tiny C Library
version 1.0
------------------------------------------------------------------------
Copyright (c) 2015 Anael Seghezzi <www.maratis3d.org>
Copyright (c) 2015 Marti Maria Saguer
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would
be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not
be misrepresented as being the original software.
3. This notice may not be removed or altered from any source
distribution.
========================================================================*/
/*
Image manipulation :
- transformation (re-frame, mirror, rotation)
- conversions (float, half, ubyte, linear, greyscale...)
- filtering (convolution, Gaussian blur, Harris)
- scaling (pyramid, generic, bilinear)
- morphology (flood-fill, dilate, erode, thinning)
- edge and corner detection (Sobel, Harris)
to create the implementation,
#define M_IMAGE_IMPLEMENTATION
in *one* C/CPP file that includes this file.
optional:
include after *m_math.h*
//////////////////////////////////////////////////////
Example: create a 256x256 float image with 1 component:
struct m_image foo1 = M_IMAGE_IDENTITY();
struct m_image foo2 = M_IMAGE_IDENTITY();
int x, y;
m_image_create(&foo1, M_FLOAT, 256, 256, 1);
memset(foo1.data, 0, foo1.size * sizeof(float)); // clear to zero
y = 128; x = 128;
((float *)foo1.data)[y * foo1.width + x] = 1.0f; // set (x, y) pixel to one
m_image_gaussian_blur(&foo2, &foo1, 3, 3); // apply Gaussian blur
m_image_destroy(&foo2);
m_image_destroy(&foo1);
*/
#ifndef M_IMAGE_H
#define M_IMAGE_H
#include <stdint.h>
#define M_IMAGE_VERSION 1
#ifdef __cplusplus
extern "C" {
#endif
#ifndef MIAPI
#define MIAPI extern
#endif
#define M_VOID 0
#define M_BOOL 1
#define M_BYTE 2
#define M_UBYTE 3
#define M_SHORT 4
#define M_USHORT 5
#define M_INT 6
#define M_UINT 7
#define M_HALF 8
#define M_FLOAT 9
#define M_DOUBLE 10
struct m_image
{
void *data;
int size;
int width;
int height;
int comp;
char type;
};
/* identity, must be used before calling m_image_create */
#define M_IMAGE_IDENTITY() {0, 0, 0, 0, 0, 0}
/* m_image type util */
MIAPI int m_type_sizeof(char type);
/* fully supported types are: M_UBYTE, M_USHORT, M_HALF, M_FLOAT
partially supported types: M_BYTE, M_SHORT, M_INT, M_UINT (no support for conversion) */
MIAPI void m_image_create(struct m_image *image, char type, int width, int height, int comp);
MIAPI void m_image_destroy(struct m_image *image);
MIAPI void m_image_ubyte_to_float(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_ushort_to_float(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_half_to_float(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_float_to_ubyte(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_float_to_ushort(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_float_to_half(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_copy(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_copy_sub_image(struct m_image *dest, const struct m_image *src, int x, int y, int w, int h);
MIAPI void m_image_reframe_zero(struct m_image *dest, const struct m_image *src, int left, int top, int right, int bottom);
MIAPI void m_image_reframe(struct m_image *dest, const struct m_image *src, int left, int top, int right, int bottom);
MIAPI void m_image_extract_component(struct m_image *dest, const struct m_image *src, int c);
MIAPI void m_image_rotate_left(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_rotate_right(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_rotate_180(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_mirror_x(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_mirror_y(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_premultiply(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_unpremultiply(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_sRGB_to_linear(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_linear_to_sRGB(struct m_image *dest, const struct m_image *src);
/* float/half conversion */
MIAPI float m_half2float(uint16_t h);
MIAPI uint16_t m_float2half(float flt);
/* raw processing */
MIAPI void m_sRGB_to_linear(float *dest, const float *src, int size);
MIAPI void m_linear_to_sRGB(float *dest, const float *src, int size);
MIAPI void m_RGB_to_HSV(float *dest, const float *src);
MIAPI void m_HSV_to_RGB(float *dest, const float *src);
MIAPI void m_RGB_to_HSL(float *dest, const float *src);
MIAPI void m_HSL_to_RGB(float *dest, const float *src);
MIAPI void m_gaussian_kernel(float *dest, int size, float radius);
MIAPI void m_sst(float *dest, const float *src, int count);
MIAPI void m_harris_response(float *dest, const float *src, int count);
MIAPI void m_tfm(float *dest, const float *src, int count);
MIAPI void m_normalize(float *dest, const float *src, int size); /* dest = src / norm(src) */
MIAPI void m_normalize_sum(float *dest, const float *src, int size); /* dest = src / sum(src) */
MIAPI float m_mean(const float *src, int size);
MIAPI float m_squared_distance(const float *src1, const float *src2, int size);
MIAPI float m_squared_distance_dispatch(const float *src1, const float *src2, int size);
MIAPI float m_convolution(const float *src1, const float *src2, int size); /* a dot product really */
MIAPI float m_chi_squared_distance(const float *src1, const float *src2, int size); /* good at estimating signed hystograms difference */
/* conversion to 1 component (float image only) */
MIAPI void m_image_grey(struct m_image *dest, const struct m_image *src); /* from RGB src */
MIAPI void m_image_max(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_max_abs(struct m_image *dest, const struct m_image *src);
/* summed area table (also called "integral image") */
MIAPI void m_image_summed_area(struct m_image *dest, const struct m_image *src);
/* convolutions (float image only) */
/* if alpha channel, src image must be pre-multiplied */
MIAPI void m_image_convolution_h_raw(struct m_image *dest, const struct m_image *src, float *kernel, int size);
MIAPI void m_image_convolution_v_raw(struct m_image *dest, const struct m_image *src, float *kernel, int size);
MIAPI void m_image_convolution_h(struct m_image *dest, const struct m_image *src, float *kernel, int size); /* horizontal */
MIAPI void m_image_convolution_v(struct m_image *dest, const struct m_image *src, float *kernel, int size); /* vertical */
MIAPI void m_image_gaussian_blur(struct m_image *dest, const struct m_image *src, float dx, float dy);
/* edge and corner (float 1 component image only) */
MIAPI void m_image_sobel(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_harris(struct m_image *dest, const struct m_image *src, float radius);
/* morphology (ubyte 1 component image only) */
MIAPI int m_image_floodfill_4x(struct m_image *dest, int x, int y, uint8_t ref, uint8_t value, uint16_t *stack, int stack_size);
MIAPI int m_image_floodfill_8x(struct m_image *dest, int x, int y, uint8_t ref, uint8_t value, uint16_t *stack, int stack_size);
MIAPI void m_image_dilate(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_erode(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_edge_4x(struct m_image *dest, const struct m_image *src, uint8_t ref);
MIAPI void m_image_thin(struct m_image *dest);
/* non maxima suppression (float image only) */
MIAPI void m_image_non_max_supp(struct m_image *dest, const struct m_image *src, int radius, float threshold);
/* detect Harris corners
margin: margin around the image to exclude corners
radius: maxima radius
threshold: Harris response threshold
corners: corners coordinates of size max_count * 2
max_count: maximum number of corners
return corner count */
MIAPI int m_image_corner_harris(const struct m_image *src, int margin, float radius, float threshold, int *corners, int max_count);
/* resizing (float image only) */
MIAPI void m_image_sub_pixel(const struct m_image *src, float x, float y, float *result);
MIAPI void m_image_pyrdown(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_resize(struct m_image *dest, const struct m_image *src, int new_width, int new_height);
#ifdef __cplusplus
}
#endif
/*
----------------------------------------------------------------------*/
#endif /* M_IMAGE_H */
#ifdef M_IMAGE_IMPLEMENTATION
#include <stdlib.h>
#include <stdio.h>
#include <memory.h>
#include <math.h>
#include <float.h>
#include <assert.h>
#ifndef M_SAFE_FREE
#define M_SAFE_FREE(p) {if (p) {free(p); (p) = NULL;}}
#endif
#ifndef M_MIN
#define M_MIN(a, b) (((a) < (b)) ? (a) : (b))
#endif
#ifndef M_MAX
#define M_MAX(a, b) (((a) > (b)) ? (a) : (b))
#endif
#ifndef M_ABS
#define M_ABS(a) (((a) < 0) ? -(a) : (a))
#endif
#ifndef M_CLAMP
#define M_CLAMP(x, low, high) (((x) > (high)) ? (high) : (((x) < (low)) ? (low) : (x)))
#endif
MIAPI void m_linear_to_sRGB(float *dest, const float *src, int size)
{
int i;
for (i = 0; i < size; i++) {
if (*src < 0.0031308f)
*dest = 12.92f * (*src);
else
*dest = (1.0f + 0.055f) * powf(*src, 1.0f/2.4f) - 0.055f;
dest++;
src++;
}
}
MIAPI void m_sRGB_to_linear(float *dest, const float *src, int size)
{
int i;
for (i = 0; i < size; i++) {
if (*src <= 0.03928f)
*dest = *src / 12.92f;
else
*dest = powf((*src + 0.055f) / 1.055f, 2.4f);
dest++;
src++;
}
}
MIAPI void m_RGB_to_HSV(float *dest, const float *src)
{
float r = src[0];
float g = src[1];
float b = src[2];
float h = 0;
float s = 0;
float v = 0;
float min = r;
float max = r;
float delta;
min = M_MIN(min, g);
min = M_MIN(min, b);
max = M_MAX(max, g);
max = M_MAX(max, b);
delta = max - min;
v = max;
if (delta == 0 || max == 0) {
dest[0] = h; dest[1] = s; dest[2] = v;
return;
}
s = delta / max;
if (r == max)
h = (g - b) / delta;
else if (g == max)
h = 2 + (b - r) / delta;
else
h = 4 + (r - g) / delta;
h *= 60;
if (h < 0) h += 360;
dest[0] = h; dest[1] = s; dest[2] = v;
}
MIAPI void m_HSV_to_RGB(float *dest, const float *src)
{
float r, g, b;
float f, p, q, t;
float h = src[0];
float s = src[1];
float v = src[2];
int i;
if (s == 0) {
dest[0] = v; dest[1] = v; dest[2] = v;
return;
}
h /= 60.0f;
i = (int)floorf(h);
f = h - i;
p = v * (1 - s);
q = v * (1 - s * f);
t = v * (1 - s * (1 - f));
switch (i) {
case 0:
r = v; g = t; b = p;
break;
case 1:
r = q; g = v; b = p;
break;
case 2:
r = p; g = v; b = t;
break;
case 3:
r = p; g = q; b = v;
break;
case 4:
r = t; g = p; b = v;
break;
default:
r = v; g = p; b = q;
break;
}
dest[0] = r; dest[1] = g; dest[2] = b;
}
MIAPI void m_RGB_to_HSL(float *dest, const float *src)
{
float h, s, l, dr, dg, db;
float r = src[0];
float g = src[1];
float b = src[2];
float min = r;
float max = r;
float delta;
min = M_MIN(min, g);
min = M_MIN(min, b);
max = M_MAX(max, g);
max = M_MAX(max, b);
delta = max - min;
h = 0;
s = 0;
l = (max + min) * 0.5f;
if (max == 0) {
dest[0] = h; dest[1] = s; dest[2] = l;
return;
}
if(r == max)
h = fmodf(((g - b) / delta), 6.0f);
else if(g == max)
h = ((b - r) / delta) + 2.0f;
else
h = ((r - g) / delta) + 4.0f;
h *= 60.0f;
if (h < 0) h += 360;
s = delta / (1.0f - fabsf(2.0f * l - 1.0f));
dest[0] = h;
dest[1] = s;
dest[2] = l;
}
MIAPI void m_HSL_to_RGB(float *dest, const float *src)
{
float h = src[0];
float s = src[1];
float l = src[2];
float c, m, x;
if (s == 0) {
dest[0] = l; dest[1] = l; dest[2] = l;
return;
}
c = (1.0f - fabsf(2.0f * l - 1.0f)) * s;
m = 1.0f * (l - 0.5f * c);
x = c * (1.0f - fabsf(fmodf(h / 60.0f, 2) - 1.0f));
if (h >= 0.0f && h < 60.0f) {
dest[0] = c + m;
dest[1] = x + m;
dest[2] = m;
}
else if (h >= 60.0f && h < 120.0f) {
dest[0] = x + m;
dest[1] = c + m;
dest[2] = m;
}
else if (h < 120.0f && h < 180.0f) {
dest[0] = m;
dest[1] = c + m;
dest[2] = x + m;
}
else if (h >= 180.0f && h < 240.0f) {
dest[0] = m;
dest[1] = x + m;
dest[2] = c + m;
}
else if (h >= 240.0f && h < 300.0f) {
dest[0] = x + m;
dest[1] = m;
dest[2] = c + m;
}
else if (h >= 300.0f && h < 360.0f) {
dest[0] = c + m;
dest[1] = m;
dest[2] = x + m;
}
else {
dest[0] = m;
dest[1] = m;
dest[2] = m;
}
}
MIAPI void m_gaussian_kernel(float *dest, int size, float radius)
{
float *k;
float rs, s2, sum;
float sigma = 1.6f;
float tetha = 2.25f;
int r, hsize = size / 2;
s2 = 1.0f / expf(sigma * sigma * tetha);
rs = sigma / radius;
k = dest;
sum = 0.0f;
/* compute gaussian kernel */
for (r = -hsize; r <= hsize; r++) {
float x = r * rs;
float v = (1.0f / expf(x * x)) - s2;
v = M_MAX(v, 0);
*k = v;
sum += v;
k++;
}
/* normalize */
if (sum > 0.0f) {
float isum = 1.0f / sum;
for (r = 0; r < size; r++)
dest[r] *= isum;
}
}
MIAPI void m_sst(float *dest, const float *src, int count)
{
register int i;
register float dx;
register float dy;
for (i = 0; i < count; i++) {
dx = src[0];
dy = src[1];
dest[0] = dx*dx;
dest[1] = dy*dy;
dest[2] = dx*dy;
src += 2;
dest += 3;
}
}
MIAPI void m_harris_response(float *dest, const float *src, int count)
{
int i;
for (i = 0; i < count; i++) {
float dx2 = src[0];
float dy2 = src[1];
float dxy = src[2];
*dest = (dx2 * dy2 - dxy * dxy) / (dx2 + dy2 + 1e-8f);
src += 3;
dest++;
}
}
MIAPI void m_tfm(float *dest, const float *src, int count)
{
int i;
for (i = 0; i < count; i++) {
if (src[0] < src[1]) {
float dx2 = src[0];
float dy2 = src[1];
float dxy = src[2];
float sqd = (dy2 * dy2) - (2.0f * dx2 * dy2) + (dx2 * dx2) + (4.0f * dxy * dxy);
float lambda = 0.5f * (dy2 + dx2 + sqrtf(M_MAX(0, sqd)));
dest[0] = dx2 - lambda;
dest[1] = dxy;
}
else {
float dy2 = src[0];
float dx2 = src[1];
float dxy = src[2];
float sqd = (dy2 * dy2) - (2.0f * dx2 * dy2) + (dx2 * dx2) + (4.0f * dxy * dxy);
float lambda = 0.5f * (dy2 + dx2 + sqrtf(M_MAX(0, sqd)));
dest[0] = dxy;
dest[1] = dx2 - lambda;
}
src += 3;
dest += 2;
}
}
MIAPI float m_chi_squared_distance(const float *src1, const float *src2, int size)
{
int i;
float score = 0;
for (i = 0; i < size; i++) {
float val1 = src1[i];
float val2 = src2[i];
/* chi squared distance */
if ((val1 + val2) > 0) {
float x = val2 - val1;
score += (x * x) / (val1 + val2);
}
}
return score * 0.5f;
}
MIAPI float m_convolution(const float *src1, const float *src2, int size)
{
float c = 0; int i;
for (i = 0; i < size; i++)
c += src1[i] * src2[i];
return c;
}
MIAPI void m_normalize(float *dest, const float *src, int size)
{
float sum = 0.0f; int i;
for(i = 0; i < size; i++)
sum += src[i] * src[i];
if (sum > 0.0f) {
sum = 1.0f / sqrtf(sum);
for(i = 0; i < size; i++)
dest[i] = src[i] * sum;
}
else if (dest != src) {
memset(dest, 0, size * sizeof(float));
}
}
MIAPI void m_normalize_sum(float *dest, const float *src, int size)
{
float sum = 0.0f; int i;
for(i = 0; i < size; i++)
sum += src[i];
if (sum > 0.0f) {
sum = 1.0f / sum;
for(i = 0; i < size; i++)
dest[i] = src[i] * sum;
}
else {
memset(dest, 0, size * sizeof(float));
}
}
MIAPI float m_mean(const float *src, int size)
{
float mean = 0; int i;
for (i = 0; i < size; i++)
mean += (*src++);
return mean / size;
}
MIAPI float m_squared_distance(const float *src1, const float *src2, int size)
{
register float score = 0.0f;
register int i;
register float x;
for (i = 0; i < size; i++) {
x = src2[i] - src1[i];
score += x * x;
}
return score;
}
/* m_half2float / m_float2half :
a big thanks to Marti Maria Saguer for allowing the use of this code
under the zlib license from "Little Color Management System" (cmshalf.c) */
/* This code is inspired in the paper "Fast Half Float Conversions"
by Jeroen van der Zijp */
static uint32_t m__mantissa[2048] = {
0x00000000, 0x33800000, 0x34000000, 0x34400000, 0x34800000, 0x34a00000,
0x34c00000, 0x34e00000, 0x35000000, 0x35100000, 0x35200000, 0x35300000,
0x35400000, 0x35500000, 0x35600000, 0x35700000, 0x35800000, 0x35880000,
0x35900000, 0x35980000, 0x35a00000, 0x35a80000, 0x35b00000, 0x35b80000,
0x35c00000, 0x35c80000, 0x35d00000, 0x35d80000, 0x35e00000, 0x35e80000,
0x35f00000, 0x35f80000, 0x36000000, 0x36040000, 0x36080000, 0x360c0000,
0x36100000, 0x36140000, 0x36180000, 0x361c0000, 0x36200000, 0x36240000,
0x36280000, 0x362c0000, 0x36300000, 0x36340000, 0x36380000, 0x363c0000,
0x36400000, 0x36440000, 0x36480000, 0x364c0000, 0x36500000, 0x36540000,
0x36580000, 0x365c0000, 0x36600000, 0x36640000, 0x36680000, 0x366c0000,
0x36700000, 0x36740000, 0x36780000, 0x367c0000, 0x36800000, 0x36820000,
0x36840000, 0x36860000, 0x36880000, 0x368a0000, 0x368c0000, 0x368e0000,
0x36900000, 0x36920000, 0x36940000, 0x36960000, 0x36980000, 0x369a0000,
0x369c0000, 0x369e0000, 0x36a00000, 0x36a20000, 0x36a40000, 0x36a60000,
0x36a80000, 0x36aa0000, 0x36ac0000, 0x36ae0000, 0x36b00000, 0x36b20000,
0x36b40000, 0x36b60000, 0x36b80000, 0x36ba0000, 0x36bc0000, 0x36be0000,
0x36c00000, 0x36c20000, 0x36c40000, 0x36c60000, 0x36c80000, 0x36ca0000,
0x36cc0000, 0x36ce0000, 0x36d00000, 0x36d20000, 0x36d40000, 0x36d60000,
0x36d80000, 0x36da0000, 0x36dc0000, 0x36de0000, 0x36e00000, 0x36e20000,
0x36e40000, 0x36e60000, 0x36e80000, 0x36ea0000, 0x36ec0000, 0x36ee0000,
0x36f00000, 0x36f20000, 0x36f40000, 0x36f60000, 0x36f80000, 0x36fa0000,
0x36fc0000, 0x36fe0000, 0x37000000, 0x37010000, 0x37020000, 0x37030000,
0x37040000, 0x37050000, 0x37060000, 0x37070000, 0x37080000, 0x37090000,
0x370a0000, 0x370b0000, 0x370c0000, 0x370d0000, 0x370e0000, 0x370f0000,
0x37100000, 0x37110000, 0x37120000, 0x37130000, 0x37140000, 0x37150000,
0x37160000, 0x37170000, 0x37180000, 0x37190000, 0x371a0000, 0x371b0000,
0x371c0000, 0x371d0000, 0x371e0000, 0x371f0000, 0x37200000, 0x37210000,
0x37220000, 0x37230000, 0x37240000, 0x37250000, 0x37260000, 0x37270000,
0x37280000, 0x37290000, 0x372a0000, 0x372b0000, 0x372c0000, 0x372d0000,
0x372e0000, 0x372f0000, 0x37300000, 0x37310000, 0x37320000, 0x37330000,
0x37340000, 0x37350000, 0x37360000, 0x37370000, 0x37380000, 0x37390000,
0x373a0000, 0x373b0000, 0x373c0000, 0x373d0000, 0x373e0000, 0x373f0000,
0x37400000, 0x37410000, 0x37420000, 0x37430000, 0x37440000, 0x37450000,
0x37460000, 0x37470000, 0x37480000, 0x37490000, 0x374a0000, 0x374b0000,
0x374c0000, 0x374d0000, 0x374e0000, 0x374f0000, 0x37500000, 0x37510000,
0x37520000, 0x37530000, 0x37540000, 0x37550000, 0x37560000, 0x37570000,
0x37580000, 0x37590000, 0x375a0000, 0x375b0000, 0x375c0000, 0x375d0000,
0x375e0000, 0x375f0000, 0x37600000, 0x37610000, 0x37620000, 0x37630000,
0x37640000, 0x37650000, 0x37660000, 0x37670000, 0x37680000, 0x37690000,
0x376a0000, 0x376b0000, 0x376c0000, 0x376d0000, 0x376e0000, 0x376f0000,
0x37700000, 0x37710000, 0x37720000, 0x37730000, 0x37740000, 0x37750000,
0x37760000, 0x37770000, 0x37780000, 0x37790000, 0x377a0000, 0x377b0000,
0x377c0000, 0x377d0000, 0x377e0000, 0x377f0000, 0x37800000, 0x37808000,
0x37810000, 0x37818000, 0x37820000, 0x37828000, 0x37830000, 0x37838000,
0x37840000, 0x37848000, 0x37850000, 0x37858000, 0x37860000, 0x37868000,
0x37870000, 0x37878000, 0x37880000, 0x37888000, 0x37890000, 0x37898000,
0x378a0000, 0x378a8000, 0x378b0000, 0x378b8000, 0x378c0000, 0x378c8000,
0x378d0000, 0x378d8000, 0x378e0000, 0x378e8000, 0x378f0000, 0x378f8000,
0x37900000, 0x37908000, 0x37910000, 0x37918000, 0x37920000, 0x37928000,
0x37930000, 0x37938000, 0x37940000, 0x37948000, 0x37950000, 0x37958000,
0x37960000, 0x37968000, 0x37970000, 0x37978000, 0x37980000, 0x37988000,
0x37990000, 0x37998000, 0x379a0000, 0x379a8000, 0x379b0000, 0x379b8000,
0x379c0000, 0x379c8000, 0x379d0000, 0x379d8000, 0x379e0000, 0x379e8000,
0x379f0000, 0x379f8000, 0x37a00000, 0x37a08000, 0x37a10000, 0x37a18000,
0x37a20000, 0x37a28000, 0x37a30000, 0x37a38000, 0x37a40000, 0x37a48000,
0x37a50000, 0x37a58000, 0x37a60000, 0x37a68000, 0x37a70000, 0x37a78000,
0x37a80000, 0x37a88000, 0x37a90000, 0x37a98000, 0x37aa0000, 0x37aa8000,
0x37ab0000, 0x37ab8000, 0x37ac0000, 0x37ac8000, 0x37ad0000, 0x37ad8000,
0x37ae0000, 0x37ae8000, 0x37af0000, 0x37af8000, 0x37b00000, 0x37b08000,
0x37b10000, 0x37b18000, 0x37b20000, 0x37b28000, 0x37b30000, 0x37b38000,
0x37b40000, 0x37b48000, 0x37b50000, 0x37b58000, 0x37b60000, 0x37b68000,
0x37b70000, 0x37b78000, 0x37b80000, 0x37b88000, 0x37b90000, 0x37b98000,
0x37ba0000, 0x37ba8000, 0x37bb0000, 0x37bb8000, 0x37bc0000, 0x37bc8000,
0x37bd0000, 0x37bd8000, 0x37be0000, 0x37be8000, 0x37bf0000, 0x37bf8000,
0x37c00000, 0x37c08000, 0x37c10000, 0x37c18000, 0x37c20000, 0x37c28000,
0x37c30000, 0x37c38000, 0x37c40000, 0x37c48000, 0x37c50000, 0x37c58000,
0x37c60000, 0x37c68000, 0x37c70000, 0x37c78000, 0x37c80000, 0x37c88000,
0x37c90000, 0x37c98000, 0x37ca0000, 0x37ca8000, 0x37cb0000, 0x37cb8000,
0x37cc0000, 0x37cc8000, 0x37cd0000, 0x37cd8000, 0x37ce0000, 0x37ce8000,
0x37cf0000, 0x37cf8000, 0x37d00000, 0x37d08000, 0x37d10000, 0x37d18000,
0x37d20000, 0x37d28000, 0x37d30000, 0x37d38000, 0x37d40000, 0x37d48000,
0x37d50000, 0x37d58000, 0x37d60000, 0x37d68000, 0x37d70000, 0x37d78000,
0x37d80000, 0x37d88000, 0x37d90000, 0x37d98000, 0x37da0000, 0x37da8000,
0x37db0000, 0x37db8000, 0x37dc0000, 0x37dc8000, 0x37dd0000, 0x37dd8000,
0x37de0000, 0x37de8000, 0x37df0000, 0x37df8000, 0x37e00000, 0x37e08000,
0x37e10000, 0x37e18000, 0x37e20000, 0x37e28000, 0x37e30000, 0x37e38000,
0x37e40000, 0x37e48000, 0x37e50000, 0x37e58000, 0x37e60000, 0x37e68000,
0x37e70000, 0x37e78000, 0x37e80000, 0x37e88000, 0x37e90000, 0x37e98000,
0x37ea0000, 0x37ea8000, 0x37eb0000, 0x37eb8000, 0x37ec0000, 0x37ec8000,
0x37ed0000, 0x37ed8000, 0x37ee0000, 0x37ee8000, 0x37ef0000, 0x37ef8000,
0x37f00000, 0x37f08000, 0x37f10000, 0x37f18000, 0x37f20000, 0x37f28000,
0x37f30000, 0x37f38000, 0x37f40000, 0x37f48000, 0x37f50000, 0x37f58000,
0x37f60000, 0x37f68000, 0x37f70000, 0x37f78000, 0x37f80000, 0x37f88000,
0x37f90000, 0x37f98000, 0x37fa0000, 0x37fa8000, 0x37fb0000, 0x37fb8000,
0x37fc0000, 0x37fc8000, 0x37fd0000, 0x37fd8000, 0x37fe0000, 0x37fe8000,
0x37ff0000, 0x37ff8000, 0x38000000, 0x38004000, 0x38008000, 0x3800c000,
0x38010000, 0x38014000, 0x38018000, 0x3801c000, 0x38020000, 0x38024000,
0x38028000, 0x3802c000, 0x38030000, 0x38034000, 0x38038000, 0x3803c000,
0x38040000, 0x38044000, 0x38048000, 0x3804c000, 0x38050000, 0x38054000,
0x38058000, 0x3805c000, 0x38060000, 0x38064000, 0x38068000, 0x3806c000,
0x38070000, 0x38074000, 0x38078000, 0x3807c000, 0x38080000, 0x38084000,
0x38088000, 0x3808c000, 0x38090000, 0x38094000, 0x38098000, 0x3809c000,
0x380a0000, 0x380a4000, 0x380a8000, 0x380ac000, 0x380b0000, 0x380b4000,
0x380b8000, 0x380bc000, 0x380c0000, 0x380c4000, 0x380c8000, 0x380cc000,
0x380d0000, 0x380d4000, 0x380d8000, 0x380dc000, 0x380e0000, 0x380e4000,
0x380e8000, 0x380ec000, 0x380f0000, 0x380f4000, 0x380f8000, 0x380fc000,
0x38100000, 0x38104000, 0x38108000, 0x3810c000, 0x38110000, 0x38114000,
0x38118000, 0x3811c000, 0x38120000, 0x38124000, 0x38128000, 0x3812c000,
0x38130000, 0x38134000, 0x38138000, 0x3813c000, 0x38140000, 0x38144000,
0x38148000, 0x3814c000, 0x38150000, 0x38154000, 0x38158000, 0x3815c000,
0x38160000, 0x38164000, 0x38168000, 0x3816c000, 0x38170000, 0x38174000,
0x38178000, 0x3817c000, 0x38180000, 0x38184000, 0x38188000, 0x3818c000,
0x38190000, 0x38194000, 0x38198000, 0x3819c000, 0x381a0000, 0x381a4000,
0x381a8000, 0x381ac000, 0x381b0000, 0x381b4000, 0x381b8000, 0x381bc000,
0x381c0000, 0x381c4000, 0x381c8000, 0x381cc000, 0x381d0000, 0x381d4000,
0x381d8000, 0x381dc000, 0x381e0000, 0x381e4000, 0x381e8000, 0x381ec000,
0x381f0000, 0x381f4000, 0x381f8000, 0x381fc000, 0x38200000, 0x38204000,
0x38208000, 0x3820c000, 0x38210000, 0x38214000, 0x38218000, 0x3821c000,
0x38220000, 0x38224000, 0x38228000, 0x3822c000, 0x38230000, 0x38234000,
0x38238000, 0x3823c000, 0x38240000, 0x38244000, 0x38248000, 0x3824c000,
0x38250000, 0x38254000, 0x38258000, 0x3825c000, 0x38260000, 0x38264000,
0x38268000, 0x3826c000, 0x38270000, 0x38274000, 0x38278000, 0x3827c000,
0x38280000, 0x38284000, 0x38288000, 0x3828c000, 0x38290000, 0x38294000,
0x38298000, 0x3829c000, 0x382a0000, 0x382a4000, 0x382a8000, 0x382ac000,
0x382b0000, 0x382b4000, 0x382b8000, 0x382bc000, 0x382c0000, 0x382c4000,
0x382c8000, 0x382cc000, 0x382d0000, 0x382d4000, 0x382d8000, 0x382dc000,
0x382e0000, 0x382e4000, 0x382e8000, 0x382ec000, 0x382f0000, 0x382f4000,
0x382f8000, 0x382fc000, 0x38300000, 0x38304000, 0x38308000, 0x3830c000,
0x38310000, 0x38314000, 0x38318000, 0x3831c000, 0x38320000, 0x38324000,
0x38328000, 0x3832c000, 0x38330000, 0x38334000, 0x38338000, 0x3833c000,
0x38340000, 0x38344000, 0x38348000, 0x3834c000, 0x38350000, 0x38354000,
0x38358000, 0x3835c000, 0x38360000, 0x38364000, 0x38368000, 0x3836c000,
0x38370000, 0x38374000, 0x38378000, 0x3837c000, 0x38380000, 0x38384000,
0x38388000, 0x3838c000, 0x38390000, 0x38394000, 0x38398000, 0x3839c000,
0x383a0000, 0x383a4000, 0x383a8000, 0x383ac000, 0x383b0000, 0x383b4000,
0x383b8000, 0x383bc000, 0x383c0000, 0x383c4000, 0x383c8000, 0x383cc000,
0x383d0000, 0x383d4000, 0x383d8000, 0x383dc000, 0x383e0000, 0x383e4000,
0x383e8000, 0x383ec000, 0x383f0000, 0x383f4000, 0x383f8000, 0x383fc000,
0x38400000, 0x38404000, 0x38408000, 0x3840c000, 0x38410000, 0x38414000,
0x38418000, 0x3841c000, 0x38420000, 0x38424000, 0x38428000, 0x3842c000,
0x38430000, 0x38434000, 0x38438000, 0x3843c000, 0x38440000, 0x38444000,
0x38448000, 0x3844c000, 0x38450000, 0x38454000, 0x38458000, 0x3845c000,
0x38460000, 0x38464000, 0x38468000, 0x3846c000, 0x38470000, 0x38474000,
0x38478000, 0x3847c000, 0x38480000, 0x38484000, 0x38488000, 0x3848c000,
0x38490000, 0x38494000, 0x38498000, 0x3849c000, 0x384a0000, 0x384a4000,
0x384a8000, 0x384ac000, 0x384b0000, 0x384b4000, 0x384b8000, 0x384bc000,
0x384c0000, 0x384c4000, 0x384c8000, 0x384cc000, 0x384d0000, 0x384d4000,
0x384d8000, 0x384dc000, 0x384e0000, 0x384e4000, 0x384e8000, 0x384ec000,
0x384f0000, 0x384f4000, 0x384f8000, 0x384fc000, 0x38500000, 0x38504000,
0x38508000, 0x3850c000, 0x38510000, 0x38514000, 0x38518000, 0x3851c000,
0x38520000, 0x38524000, 0x38528000, 0x3852c000, 0x38530000, 0x38534000,
0x38538000, 0x3853c000, 0x38540000, 0x38544000, 0x38548000, 0x3854c000,
0x38550000, 0x38554000, 0x38558000, 0x3855c000, 0x38560000, 0x38564000,
0x38568000, 0x3856c000, 0x38570000, 0x38574000, 0x38578000, 0x3857c000,
0x38580000, 0x38584000, 0x38588000, 0x3858c000, 0x38590000, 0x38594000,
0x38598000, 0x3859c000, 0x385a0000, 0x385a4000, 0x385a8000, 0x385ac000,
0x385b0000, 0x385b4000, 0x385b8000, 0x385bc000, 0x385c0000, 0x385c4000,
0x385c8000, 0x385cc000, 0x385d0000, 0x385d4000, 0x385d8000, 0x385dc000,
0x385e0000, 0x385e4000, 0x385e8000, 0x385ec000, 0x385f0000, 0x385f4000,
0x385f8000, 0x385fc000, 0x38600000, 0x38604000, 0x38608000, 0x3860c000,
0x38610000, 0x38614000, 0x38618000, 0x3861c000, 0x38620000, 0x38624000,
0x38628000, 0x3862c000, 0x38630000, 0x38634000, 0x38638000, 0x3863c000,
0x38640000, 0x38644000, 0x38648000, 0x3864c000, 0x38650000, 0x38654000,
0x38658000, 0x3865c000, 0x38660000, 0x38664000, 0x38668000, 0x3866c000,
0x38670000, 0x38674000, 0x38678000, 0x3867c000, 0x38680000, 0x38684000,
0x38688000, 0x3868c000, 0x38690000, 0x38694000, 0x38698000, 0x3869c000,
0x386a0000, 0x386a4000, 0x386a8000, 0x386ac000, 0x386b0000, 0x386b4000,
0x386b8000, 0x386bc000, 0x386c0000, 0x386c4000, 0x386c8000, 0x386cc000,
0x386d0000, 0x386d4000, 0x386d8000, 0x386dc000, 0x386e0000, 0x386e4000,
0x386e8000, 0x386ec000, 0x386f0000, 0x386f4000, 0x386f8000, 0x386fc000,
0x38700000, 0x38704000, 0x38708000, 0x3870c000, 0x38710000, 0x38714000,
0x38718000, 0x3871c000, 0x38720000, 0x38724000, 0x38728000, 0x3872c000,
0x38730000, 0x38734000, 0x38738000, 0x3873c000, 0x38740000, 0x38744000,
0x38748000, 0x3874c000, 0x38750000, 0x38754000, 0x38758000, 0x3875c000,
0x38760000, 0x38764000, 0x38768000, 0x3876c000, 0x38770000, 0x38774000,
0x38778000, 0x3877c000, 0x38780000, 0x38784000, 0x38788000, 0x3878c000,
0x38790000, 0x38794000, 0x38798000, 0x3879c000, 0x387a0000, 0x387a4000,
0x387a8000, 0x387ac000, 0x387b0000, 0x387b4000, 0x387b8000, 0x387bc000,
0x387c0000, 0x387c4000, 0x387c8000, 0x387cc000, 0x387d0000, 0x387d4000,
0x387d8000, 0x387dc000, 0x387e0000, 0x387e4000, 0x387e8000, 0x387ec000,
0x387f0000, 0x387f4000, 0x387f8000, 0x387fc000, 0x38000000, 0x38002000,
0x38004000, 0x38006000, 0x38008000, 0x3800a000, 0x3800c000, 0x3800e000,
0x38010000, 0x38012000, 0x38014000, 0x38016000, 0x38018000, 0x3801a000,
0x3801c000, 0x3801e000, 0x38020000, 0x38022000, 0x38024000, 0x38026000,
0x38028000, 0x3802a000, 0x3802c000, 0x3802e000, 0x38030000, 0x38032000,
0x38034000, 0x38036000, 0x38038000, 0x3803a000, 0x3803c000, 0x3803e000,
0x38040000, 0x38042000, 0x38044000, 0x38046000, 0x38048000, 0x3804a000,
0x3804c000, 0x3804e000, 0x38050000, 0x38052000, 0x38054000, 0x38056000,
0x38058000, 0x3805a000, 0x3805c000, 0x3805e000, 0x38060000, 0x38062000,
0x38064000, 0x38066000, 0x38068000, 0x3806a000, 0x3806c000, 0x3806e000,
0x38070000, 0x38072000, 0x38074000, 0x38076000, 0x38078000, 0x3807a000,
0x3807c000, 0x3807e000, 0x38080000, 0x38082000, 0x38084000, 0x38086000,
0x38088000, 0x3808a000, 0x3808c000, 0x3808e000, 0x38090000, 0x38092000,
0x38094000, 0x38096000, 0x38098000, 0x3809a000, 0x3809c000, 0x3809e000,
0x380a0000, 0x380a2000, 0x380a4000, 0x380a6000, 0x380a8000, 0x380aa000,
0x380ac000, 0x380ae000, 0x380b0000, 0x380b2000, 0x380b4000, 0x380b6000,
0x380b8000, 0x380ba000, 0x380bc000, 0x380be000, 0x380c0000, 0x380c2000,
0x380c4000, 0x380c6000, 0x380c8000, 0x380ca000, 0x380cc000, 0x380ce000,
0x380d0000, 0x380d2000, 0x380d4000, 0x380d6000, 0x380d8000, 0x380da000,
0x380dc000, 0x380de000, 0x380e0000, 0x380e2000, 0x380e4000, 0x380e6000,
0x380e8000, 0x380ea000, 0x380ec000, 0x380ee000, 0x380f0000, 0x380f2000,
0x380f4000, 0x380f6000, 0x380f8000, 0x380fa000, 0x380fc000, 0x380fe000,
0x38100000, 0x38102000, 0x38104000, 0x38106000, 0x38108000, 0x3810a000,
0x3810c000, 0x3810e000, 0x38110000, 0x38112000, 0x38114000, 0x38116000,
0x38118000, 0x3811a000, 0x3811c000, 0x3811e000, 0x38120000, 0x38122000,
0x38124000, 0x38126000, 0x38128000, 0x3812a000, 0x3812c000, 0x3812e000,
0x38130000, 0x38132000, 0x38134000, 0x38136000, 0x38138000, 0x3813a000,
0x3813c000, 0x3813e000, 0x38140000, 0x38142000, 0x38144000, 0x38146000,
0x38148000, 0x3814a000, 0x3814c000, 0x3814e000, 0x38150000, 0x38152000,
0x38154000, 0x38156000, 0x38158000, 0x3815a000, 0x3815c000, 0x3815e000,
0x38160000, 0x38162000, 0x38164000, 0x38166000, 0x38168000, 0x3816a000,
0x3816c000, 0x3816e000, 0x38170000, 0x38172000, 0x38174000, 0x38176000,
0x38178000, 0x3817a000, 0x3817c000, 0x3817e000, 0x38180000, 0x38182000,
0x38184000, 0x38186000, 0x38188000, 0x3818a000, 0x3818c000, 0x3818e000,
0x38190000, 0x38192000, 0x38194000, 0x38196000, 0x38198000, 0x3819a000,
0x3819c000, 0x3819e000, 0x381a0000, 0x381a2000, 0x381a4000, 0x381a6000,
0x381a8000, 0x381aa000, 0x381ac000, 0x381ae000, 0x381b0000, 0x381b2000,
0x381b4000, 0x381b6000, 0x381b8000, 0x381ba000, 0x381bc000, 0x381be000,
0x381c0000, 0x381c2000, 0x381c4000, 0x381c6000, 0x381c8000, 0x381ca000,
0x381cc000, 0x381ce000, 0x381d0000, 0x381d2000, 0x381d4000, 0x381d6000,
0x381d8000, 0x381da000, 0x381dc000, 0x381de000, 0x381e0000, 0x381e2000,
0x381e4000, 0x381e6000, 0x381e8000, 0x381ea000, 0x381ec000, 0x381ee000,
0x381f0000, 0x381f2000, 0x381f4000, 0x381f6000, 0x381f8000, 0x381fa000,
0x381fc000, 0x381fe000, 0x38200000, 0x38202000, 0x38204000, 0x38206000,
0x38208000, 0x3820a000, 0x3820c000, 0x3820e000, 0x38210000, 0x38212000,
0x38214000, 0x38216000, 0x38218000, 0x3821a000, 0x3821c000, 0x3821e000,
0x38220000, 0x38222000, 0x38224000, 0x38226000, 0x38228000, 0x3822a000,
0x3822c000, 0x3822e000, 0x38230000, 0x38232000, 0x38234000, 0x38236000,
0x38238000, 0x3823a000, 0x3823c000, 0x3823e000, 0x38240000, 0x38242000,
0x38244000, 0x38246000, 0x38248000, 0x3824a000, 0x3824c000, 0x3824e000,
0x38250000, 0x38252000, 0x38254000, 0x38256000, 0x38258000, 0x3825a000,
0x3825c000, 0x3825e000, 0x38260000, 0x38262000, 0x38264000, 0x38266000,
0x38268000, 0x3826a000, 0x3826c000, 0x3826e000, 0x38270000, 0x38272000,
0x38274000, 0x38276000, 0x38278000, 0x3827a000, 0x3827c000, 0x3827e000,
0x38280000, 0x38282000, 0x38284000, 0x38286000, 0x38288000, 0x3828a000,
0x3828c000, 0x3828e000, 0x38290000, 0x38292000, 0x38294000, 0x38296000,
0x38298000, 0x3829a000, 0x3829c000, 0x3829e000, 0x382a0000, 0x382a2000,
0x382a4000, 0x382a6000, 0x382a8000, 0x382aa000, 0x382ac000, 0x382ae000,
0x382b0000, 0x382b2000, 0x382b4000, 0x382b6000, 0x382b8000, 0x382ba000,
0x382bc000, 0x382be000, 0x382c0000, 0x382c2000, 0x382c4000, 0x382c6000,
0x382c8000, 0x382ca000, 0x382cc000, 0x382ce000, 0x382d0000, 0x382d2000,
0x382d4000, 0x382d6000, 0x382d8000, 0x382da000, 0x382dc000, 0x382de000,
0x382e0000, 0x382e2000, 0x382e4000, 0x382e6000, 0x382e8000, 0x382ea000,
0x382ec000, 0x382ee000, 0x382f0000, 0x382f2000, 0x382f4000, 0x382f6000,
0x382f8000, 0x382fa000, 0x382fc000, 0x382fe000, 0x38300000, 0x38302000,
0x38304000, 0x38306000, 0x38308000, 0x3830a000, 0x3830c000, 0x3830e000,
0x38310000, 0x38312000, 0x38314000, 0x38316000, 0x38318000, 0x3831a000,
0x3831c000, 0x3831e000, 0x38320000, 0x38322000, 0x38324000, 0x38326000,
0x38328000, 0x3832a000, 0x3832c000, 0x3832e000, 0x38330000, 0x38332000,
0x38334000, 0x38336000, 0x38338000, 0x3833a000, 0x3833c000, 0x3833e000,
0x38340000, 0x38342000, 0x38344000, 0x38346000, 0x38348000, 0x3834a000,
0x3834c000, 0x3834e000, 0x38350000, 0x38352000, 0x38354000, 0x38356000,
0x38358000, 0x3835a000, 0x3835c000, 0x3835e000, 0x38360000, 0x38362000,
0x38364000, 0x38366000, 0x38368000, 0x3836a000, 0x3836c000, 0x3836e000,
0x38370000, 0x38372000, 0x38374000, 0x38376000, 0x38378000, 0x3837a000,
0x3837c000, 0x3837e000, 0x38380000, 0x38382000, 0x38384000, 0x38386000,
0x38388000, 0x3838a000, 0x3838c000, 0x3838e000, 0x38390000, 0x38392000,
0x38394000, 0x38396000, 0x38398000, 0x3839a000, 0x3839c000, 0x3839e000,
0x383a0000, 0x383a2000, 0x383a4000, 0x383a6000, 0x383a8000, 0x383aa000,
0x383ac000, 0x383ae000, 0x383b0000, 0x383b2000, 0x383b4000, 0x383b6000,
0x383b8000, 0x383ba000, 0x383bc000, 0x383be000, 0x383c0000, 0x383c2000,
0x383c4000, 0x383c6000, 0x383c8000, 0x383ca000, 0x383cc000, 0x383ce000,
0x383d0000, 0x383d2000, 0x383d4000, 0x383d6000, 0x383d8000, 0x383da000,
0x383dc000, 0x383de000, 0x383e0000, 0x383e2000, 0x383e4000, 0x383e6000,
0x383e8000, 0x383ea000, 0x383ec000, 0x383ee000, 0x383f0000, 0x383f2000,
0x383f4000, 0x383f6000, 0x383f8000, 0x383fa000, 0x383fc000, 0x383fe000,
0x38400000, 0x38402000, 0x38404000, 0x38406000, 0x38408000, 0x3840a000,
0x3840c000, 0x3840e000, 0x38410000, 0x38412000, 0x38414000, 0x38416000,
0x38418000, 0x3841a000, 0x3841c000, 0x3841e000, 0x38420000, 0x38422000,
0x38424000, 0x38426000, 0x38428000, 0x3842a000, 0x3842c000, 0x3842e000,
0x38430000, 0x38432000, 0x38434000, 0x38436000, 0x38438000, 0x3843a000,
0x3843c000, 0x3843e000, 0x38440000, 0x38442000, 0x38444000, 0x38446000,
0x38448000, 0x3844a000, 0x3844c000, 0x3844e000, 0x38450000, 0x38452000,
0x38454000, 0x38456000, 0x38458000, 0x3845a000, 0x3845c000, 0x3845e000,
0x38460000, 0x38462000, 0x38464000, 0x38466000, 0x38468000, 0x3846a000,
0x3846c000, 0x3846e000, 0x38470000, 0x38472000, 0x38474000, 0x38476000,
0x38478000, 0x3847a000, 0x3847c000, 0x3847e000, 0x38480000, 0x38482000,
0x38484000, 0x38486000, 0x38488000, 0x3848a000, 0x3848c000, 0x3848e000,
0x38490000, 0x38492000, 0x38494000, 0x38496000, 0x38498000, 0x3849a000,
0x3849c000, 0x3849e000, 0x384a0000, 0x384a2000, 0x384a4000, 0x384a6000,
0x384a8000, 0x384aa000, 0x384ac000, 0x384ae000, 0x384b0000, 0x384b2000,
0x384b4000, 0x384b6000, 0x384b8000, 0x384ba000, 0x384bc000, 0x384be000,
0x384c0000, 0x384c2000, 0x384c4000, 0x384c6000, 0x384c8000, 0x384ca000,
0x384cc000, 0x384ce000, 0x384d0000, 0x384d2000, 0x384d4000, 0x384d6000,
0x384d8000, 0x384da000, 0x384dc000, 0x384de000, 0x384e0000, 0x384e2000,
0x384e4000, 0x384e6000, 0x384e8000, 0x384ea000, 0x384ec000, 0x384ee000,
0x384f0000, 0x384f2000, 0x384f4000, 0x384f6000, 0x384f8000, 0x384fa000,
0x384fc000, 0x384fe000, 0x38500000, 0x38502000, 0x38504000, 0x38506000,
0x38508000, 0x3850a000, 0x3850c000, 0x3850e000, 0x38510000, 0x38512000,
0x38514000, 0x38516000, 0x38518000, 0x3851a000, 0x3851c000, 0x3851e000,
0x38520000, 0x38522000, 0x38524000, 0x38526000, 0x38528000, 0x3852a000,
0x3852c000, 0x3852e000, 0x38530000, 0x38532000, 0x38534000, 0x38536000,
0x38538000, 0x3853a000, 0x3853c000, 0x3853e000, 0x38540000, 0x38542000,
0x38544000, 0x38546000, 0x38548000, 0x3854a000, 0x3854c000, 0x3854e000,
0x38550000, 0x38552000, 0x38554000, 0x38556000, 0x38558000, 0x3855a000,
0x3855c000, 0x3855e000, 0x38560000, 0x38562000, 0x38564000, 0x38566000,
0x38568000, 0x3856a000, 0x3856c000, 0x3856e000, 0x38570000, 0x38572000,
0x38574000, 0x38576000, 0x38578000, 0x3857a000, 0x3857c000, 0x3857e000,
0x38580000, 0x38582000, 0x38584000, 0x38586000, 0x38588000, 0x3858a000,
0x3858c000, 0x3858e000, 0x38590000, 0x38592000, 0x38594000, 0x38596000,
0x38598000, 0x3859a000, 0x3859c000, 0x3859e000, 0x385a0000, 0x385a2000,
0x385a4000, 0x385a6000, 0x385a8000, 0x385aa000, 0x385ac000, 0x385ae000,
0x385b0000, 0x385b2000, 0x385b4000, 0x385b6000, 0x385b8000, 0x385ba000,
0x385bc000, 0x385be000, 0x385c0000, 0x385c2000, 0x385c4000, 0x385c6000,
0x385c8000, 0x385ca000, 0x385cc000, 0x385ce000, 0x385d0000, 0x385d2000,
0x385d4000, 0x385d6000, 0x385d8000, 0x385da000, 0x385dc000, 0x385de000,
0x385e0000, 0x385e2000, 0x385e4000, 0x385e6000, 0x385e8000, 0x385ea000,
0x385ec000, 0x385ee000, 0x385f0000, 0x385f2000, 0x385f4000, 0x385f6000,
0x385f8000, 0x385fa000, 0x385fc000, 0x385fe000, 0x38600000, 0x38602000,
0x38604000, 0x38606000, 0x38608000, 0x3860a000, 0x3860c000, 0x3860e000,
0x38610000, 0x38612000, 0x38614000, 0x38616000, 0x38618000, 0x3861a000,
0x3861c000, 0x3861e000, 0x38620000, 0x38622000, 0x38624000, 0x38626000,
0x38628000, 0x3862a000, 0x3862c000, 0x3862e000, 0x38630000, 0x38632000,
0x38634000, 0x38636000, 0x38638000, 0x3863a000, 0x3863c000, 0x3863e000,
0x38640000, 0x38642000, 0x38644000, 0x38646000, 0x38648000, 0x3864a000,
0x3864c000, 0x3864e000, 0x38650000, 0x38652000, 0x38654000, 0x38656000,
0x38658000, 0x3865a000, 0x3865c000, 0x3865e000, 0x38660000, 0x38662000,
0x38664000, 0x38666000, 0x38668000, 0x3866a000, 0x3866c000, 0x3866e000,
0x38670000, 0x38672000, 0x38674000, 0x38676000, 0x38678000, 0x3867a000,
0x3867c000, 0x3867e000, 0x38680000, 0x38682000, 0x38684000, 0x38686000,
0x38688000, 0x3868a000, 0x3868c000, 0x3868e000, 0x38690000, 0x38692000,
0x38694000, 0x38696000, 0x38698000, 0x3869a000, 0x3869c000, 0x3869e000,
0x386a0000, 0x386a2000, 0x386a4000, 0x386a6000, 0x386a8000, 0x386aa000,
0x386ac000, 0x386ae000, 0x386b0000, 0x386b2000, 0x386b4000, 0x386b6000,
0x386b8000, 0x386ba000, 0x386bc000, 0x386be000, 0x386c0000, 0x386c2000,
0x386c4000, 0x386c6000, 0x386c8000, 0x386ca000, 0x386cc000, 0x386ce000,
0x386d0000, 0x386d2000, 0x386d4000, 0x386d6000, 0x386d8000, 0x386da000,
0x386dc000, 0x386de000, 0x386e0000, 0x386e2000, 0x386e4000, 0x386e6000,
0x386e8000, 0x386ea000, 0x386ec000, 0x386ee000, 0x386f0000, 0x386f2000,
0x386f4000, 0x386f6000, 0x386f8000, 0x386fa000, 0x386fc000, 0x386fe000,
0x38700000, 0x38702000, 0x38704000, 0x38706000, 0x38708000, 0x3870a000,
0x3870c000, 0x3870e000, 0x38710000, 0x38712000, 0x38714000, 0x38716000,
0x38718000, 0x3871a000, 0x3871c000, 0x3871e000, 0x38720000, 0x38722000,
0x38724000, 0x38726000, 0x38728000, 0x3872a000, 0x3872c000, 0x3872e000,
0x38730000, 0x38732000, 0x38734000, 0x38736000, 0x38738000, 0x3873a000,
0x3873c000, 0x3873e000, 0x38740000, 0x38742000, 0x38744000, 0x38746000,
0x38748000, 0x3874a000, 0x3874c000, 0x3874e000, 0x38750000, 0x38752000,
0x38754000, 0x38756000, 0x38758000, 0x3875a000, 0x3875c000, 0x3875e000,
0x38760000, 0x38762000, 0x38764000, 0x38766000, 0x38768000, 0x3876a000,
0x3876c000, 0x3876e000, 0x38770000, 0x38772000, 0x38774000, 0x38776000,
0x38778000, 0x3877a000, 0x3877c000, 0x3877e000, 0x38780000, 0x38782000,
0x38784000, 0x38786000, 0x38788000, 0x3878a000, 0x3878c000, 0x3878e000,
0x38790000, 0x38792000, 0x38794000, 0x38796000, 0x38798000, 0x3879a000,
0x3879c000, 0x3879e000, 0x387a0000, 0x387a2000, 0x387a4000, 0x387a6000,
0x387a8000, 0x387aa000, 0x387ac000, 0x387ae000, 0x387b0000, 0x387b2000,
0x387b4000, 0x387b6000, 0x387b8000, 0x387ba000, 0x387bc000, 0x387be000,
0x387c0000, 0x387c2000, 0x387c4000, 0x387c6000, 0x387c8000, 0x387ca000,
0x387cc000, 0x387ce000, 0x387d0000, 0x387d2000, 0x387d4000, 0x387d6000,
0x387d8000, 0x387da000, 0x387dc000, 0x387de000, 0x387e0000, 0x387e2000,
0x387e4000, 0x387e6000, 0x387e8000, 0x387ea000, 0x387ec000, 0x387ee000,
0x387f0000, 0x387f2000, 0x387f4000, 0x387f6000, 0x387f8000, 0x387fa000,
0x387fc000, 0x387fe000
};
static uint16_t m__offset[64] = {
0x0000, 0x0400, 0x0400, 0x0400, 0x0400, 0x0400,
0x0400, 0x0400, 0x0400, 0x0400, 0x0400, 0x0400,
0x0400, 0x0400, 0x0400, 0x0400, 0x0400, 0x0400,
0x0400, 0x0400, 0x0400, 0x0400, 0x0400, 0x0400,
0x0400, 0x0400, 0x0400, 0x0400, 0x0400, 0x0400,
0x0400, 0x0400, 0x0000, 0x0400, 0x0400, 0x0400,
0x0400, 0x0400, 0x0400, 0x0400, 0x0400, 0x0400,
0x0400, 0x0400, 0x0400, 0x0400, 0x0400, 0x0400,
0x0400, 0x0400, 0x0400, 0x0400, 0x0400, 0x0400,
0x0400, 0x0400, 0x0400, 0x0400, 0x0400, 0x0400,
0x0400, 0x0400, 0x0400, 0x0400
};
static uint32_t m__exponent[64] = {
0x00000000, 0x00800000, 0x01000000, 0x01800000, 0x02000000, 0x02800000,
0x03000000, 0x03800000, 0x04000000, 0x04800000, 0x05000000, 0x05800000,
0x06000000, 0x06800000, 0x07000000, 0x07800000, 0x08000000, 0x08800000,
0x09000000, 0x09800000, 0x0a000000, 0x0a800000, 0x0b000000, 0x0b800000,
0x0c000000, 0x0c800000, 0x0d000000, 0x0d800000, 0x0e000000, 0x0e800000,
0x0f000000, 0x47800000, 0x80000000, 0x80800000, 0x81000000, 0x81800000,
0x82000000, 0x82800000, 0x83000000, 0x83800000, 0x84000000, 0x84800000,
0x85000000, 0x85800000, 0x86000000, 0x86800000, 0x87000000, 0x87800000,
0x88000000, 0x88800000, 0x89000000, 0x89800000, 0x8a000000, 0x8a800000,
0x8b000000, 0x8b800000, 0x8c000000, 0x8c800000, 0x8d000000, 0x8d800000,
0x8e000000, 0x8e800000, 0x8f000000, 0xc7800000
};
static uint16_t m__base[512] = {
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040,
0x0080, 0x0100, 0x0200, 0x0400, 0x0800, 0x0c00, 0x1000, 0x1400, 0x1800, 0x1c00,
0x2000, 0x2400, 0x2800, 0x2c00, 0x3000, 0x3400, 0x3800, 0x3c00, 0x4000, 0x4400,
0x4800, 0x4c00, 0x5000, 0x5400, 0x5800, 0x5c00, 0x6000, 0x6400, 0x6800, 0x6c00,
0x7000, 0x7400, 0x7800, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00,
0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00,
0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00,
0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00,
0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00,
0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00,
0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00,
0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00,
0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00,
0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00,
0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00,
0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x8000, 0x8000, 0x8000, 0x8000,
0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8001,
0x8002, 0x8004, 0x8008, 0x8010, 0x8020, 0x8040, 0x8080, 0x8100, 0x8200, 0x8400,
0x8800, 0x8c00, 0x9000, 0x9400, 0x9800, 0x9c00, 0xa000, 0xa400, 0xa800, 0xac00,
0xb000, 0xb400, 0xb800, 0xbc00, 0xc000, 0xc400, 0xc800, 0xcc00, 0xd000, 0xd400,
0xd800, 0xdc00, 0xe000, 0xe400, 0xe800, 0xec00, 0xf000, 0xf400, 0xf800, 0xfc00,
0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00,
0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00,
0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00,
0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00,
0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00,
0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00,
0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00,
0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00,
0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00,
0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00,
0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00,
0xfc00, 0xfc00
};
static uint8_t m__shift[512] = {
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x17,
0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x0e, 0x0d, 0x0d, 0x0d, 0x0d,
0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d,
0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x0d, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13,
0x12, 0x11, 0x10, 0x0f, 0x0e, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d,
0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d,
0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x0d
};
MIAPI float m_half2float(uint16_t h)
{
union {
float flt;
uint32_t num;
} out;
int n = h >> 10;
out.num = m__mantissa[ (h & 0x3ff) + m__offset[n]] + m__exponent[n];
return out.flt;
}
MIAPI uint16_t m_float2half(float flt)
{
union {
float flt;
uint32_t num;
} in;
uint32_t n, j;
in.flt = flt;
n = in.num;
j = (n >> 23) & 0x1ff;
return (uint16_t) ((uint32_t) m__base[j] + ((n & 0x007fffff) >> m__shift[j]));
}
MIAPI int m_type_sizeof(char type)
{
switch (type) {
case M_BYTE:
case M_UBYTE:
return sizeof(uint8_t);
break;
case M_SHORT:
case M_USHORT:
case M_HALF:
return sizeof(uint16_t);
break;
case M_BOOL:
case M_INT:
case M_UINT:
return sizeof(uint32_t);
break;
case M_FLOAT:
return sizeof(float);
break;
case M_DOUBLE:
return sizeof(double);
break;
default:
assert(0);
return 0;
}
}
MIAPI void m_image_create(struct m_image *image, char type, int width, int height, int comp)
{
int size = width * height * comp;
assert(size > 0);
/* already allocated */
if (image->data != 0 && type == image->type && width == image->width && height == image->height && comp == image->comp)
return;
M_SAFE_FREE(image->data);
image->data = malloc(size * m_type_sizeof(type));
if( !image->data )
printf("BAD ALLOC:m_image_create\n");
image->type = type;
image->width = width;
image->height = height;
image->comp = comp;
image->size = size;
}
MIAPI void m_image_destroy(struct m_image *image)
{
M_SAFE_FREE(image->data);
memset(image, 0, sizeof(struct m_image));
}
MIAPI void m_image_copy(struct m_image *dest, const struct m_image *src)
{
m_image_create(dest, src->type, src->width, src->height, src->comp);
switch (dest->type) {
case M_BYTE:
case M_UBYTE:
memcpy(dest->data, src->data, dest->size*sizeof(uint8_t));
break;
case M_SHORT:
case M_USHORT:
case M_HALF:
memcpy(dest->data, src->data, dest->size*sizeof(uint16_t));
break;
case M_INT:
case M_UINT:
memcpy(dest->data, src->data, dest->size*sizeof(uint32_t));
break;
case M_FLOAT:
memcpy(dest->data, src->data, dest->size*sizeof(float));
break;
default:
assert(0);
break;
}
}
MIAPI void m_image_copy_sub_image(struct m_image *dest, const struct m_image *src, int x, int y, int w, int h)
{
#define M_COPY_SUBI(T)\
{\
T *sData = (T *)src->data + (miny * src->width + minx) * comp;\
T *dData = (T *)dest->data;\
int y;\
for (y = miny; y <= maxy; y++) {\
memcpy(dData, sData, dstep * sizeof(T));\
dData += dstep;\
sData += sstep;\
}\
}
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_copy_sub_image(dest, &tmp, x, y, w, h);
m_image_destroy(&tmp);
}
else {
int comp = src->comp;
int minx = M_MAX(0, x);
int miny = M_MAX(0, y);
int maxx = M_CLAMP(x + w - 1, 0, src->width - 1);
int maxy = M_CLAMP(y + h - 1, 0, src->height - 1);
int dwidth = 1 + maxx - minx;
int dheight = 1 + maxy - miny;
int sstep = src->width * comp;
int dstep = dwidth * comp;
m_image_create(dest, src->type, dwidth, dheight, src->comp);
switch(src->type)
{
case M_BYTE:
case M_UBYTE:
M_COPY_SUBI(uint8_t);
break;
case M_SHORT:
case M_USHORT:
case M_HALF:
M_COPY_SUBI(uint16_t);
break;
case M_INT:
case M_UINT:
M_COPY_SUBI(uint32_t);
break;
case M_FLOAT:
M_COPY_SUBI(float);
break;
default:
assert(0);
break;
}
}
#undef M_COPY_SUBI
}
MIAPI void m_image_ubyte_to_float(struct m_image *dest, const struct m_image *src)
{
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_ubyte_to_float(dest, &tmp);
m_image_destroy(&tmp);
}
else {
uint8_t *src_data;
float *dest_data;
float ubyte_div = 1.0f / 255.0f;
int i;
m_image_create(dest, M_FLOAT, src->width, src->height, src->comp);
src_data = (uint8_t *)src->data;
dest_data = (float *)dest->data;
for (i = 0; i < src->size; i++)
dest_data[i] = (float)src_data[i] * ubyte_div;
}
}
MIAPI void m_image_ushort_to_float(struct m_image *dest, const struct m_image *src)
{
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_ushort_to_float(dest, &tmp);
m_image_destroy(&tmp);
}
else {
uint16_t *src_data;
float *dest_data;
float ushort_div = 1.0f / (float)65535;
int i;
m_image_create(dest, M_FLOAT, src->width, src->height, src->comp);
src_data = (uint16_t *)src->data;
dest_data = (float *)dest->data;
for (i = 0; i < src->size; i++)
dest_data[i] = (float)src_data[i] * ushort_div;
}
}
MIAPI void m_image_half_to_float(struct m_image *dest, const struct m_image *src)
{
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_half_to_float(dest, &tmp);
m_image_destroy(&tmp);
}
else {
uint16_t *src_data;
float *dest_data;
int i;
m_image_create(dest, M_FLOAT, src->width, src->height, src->comp);
src_data = (uint16_t *)src->data;
dest_data = (float *)dest->data;
for (i = 0; i < src->size; i++)
dest_data[i] = m_half2float(src_data[i]);
}
}
MIAPI void m_image_float_to_ubyte(struct m_image *dest, const struct m_image *src)
{
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_float_to_ubyte(dest, &tmp);
m_image_destroy(&tmp);
}
else {
float *src_data;
uint8_t *dest_data;
int i;
m_image_create(dest, M_UBYTE, src->width, src->height, src->comp);
src_data = (float *)src->data;
dest_data = (uint8_t *)dest->data;
for (i = 0; i < src->size; i++) {
int x = (int)(src_data[i] * 255.0f + 0.5f);
dest_data[i] = (uint8_t)M_CLAMP(x, 0, 255);
}
}
}
MIAPI void m_image_float_to_ushort(struct m_image *dest, const struct m_image *src)
{
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_float_to_ushort(dest, &tmp);
m_image_destroy(&tmp);
}
else {
float *src_data;
uint16_t *dest_data;
int i;
m_image_create(dest, M_USHORT, src->width, src->height, src->comp);
src_data = (float *)src->data;
dest_data = (uint16_t *)dest->data;
for (i = 0; i < src->size; i++) {
int x = (int)(src_data[i] * 65535);
dest_data[i] = (uint16_t)M_CLAMP(x, 0, 65535);
}
}
}
MIAPI void m_image_float_to_half(struct m_image *dest, const struct m_image *src)
{
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_float_to_half(dest, &tmp);
m_image_destroy(&tmp);
}
else {
float *src_data;
uint16_t *dest_data;
int i;
m_image_create(dest, M_HALF, src->width, src->height, src->comp);
src_data = (float *)src->data;
dest_data = (uint16_t *)dest->data;
for (i = 0; i < src->size; i++)
dest_data[i] = m_float2half(src_data[i]);
}
}
MIAPI void m_image_extract_component(struct m_image *dest, const struct m_image *src, int c)
{
#define M_EXTRACT(T)\
{\
T *dest_pixel = (T *)dest->data;\
T *src_pixel = (T *)src->data;\
for (i = 0; i < size; i += comp) {\
(*dest_pixel) = src_pixel[c];\
dest_pixel++;\
src_pixel += comp;\
}\
}
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_extract_component(dest, &tmp, c);
m_image_destroy(&tmp);
}
else {
int width = src->width;
int height = src->height;
int comp = src->comp;
int size = src->size;
int i;
if(c >= src->comp) {
assert(0);
return;
}
m_image_create(dest, src->type, width, height, 1);
switch(src->type)
{
case M_BYTE:
case M_UBYTE:
M_EXTRACT(uint8_t);
break;
case M_SHORT:
case M_USHORT:
case M_HALF:
M_EXTRACT(uint16_t);
break;
case M_INT:
case M_UINT:
M_EXTRACT(uint32_t);
break;
case M_FLOAT:
M_EXTRACT(float);
break;
default:
assert(0);
break;
}
}
#undef M_EXTRACT
}
MIAPI void m_image_reframe_zero(struct m_image *dest, const struct m_image *src, int left, int top, int right, int bottom)
{
#define M_REFRAME(T)\
{\
T *src_data;\
T *src_pixel;\
T *dest_pixel;\
int c;\
int x, y;\
m_image_create(dest, src->type, width2, height2, comp);\
src_data = (T *)src->data;\
dest_pixel = (T *)dest->data;\
for (y = 0; y < height2; y++) {\
int ys = y - top;\
for (x = 0; x < width2; x++) {\
int xs = x - left;\
if (ys >= 0 && ys < height && xs >= 0 && xs < width) {\
src_pixel = src_data + (ys * width + xs) * comp;\
for (c = 0; c < comp; c++)\
dest_pixel[c] = src_pixel[c];\
}\
else {\
for (c = 0; c < comp; c++)\
dest_pixel[c] = 0;\
}\
dest_pixel += comp;\
}\
}\
}
if(left != 0 || top != 0 || right != 0 || bottom != 0) {
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_reframe_zero(dest, &tmp, left, top, right, bottom);
m_image_destroy(&tmp);
}
else {
int comp = src->comp;
int width = src->width;
int height = src->height;
int width2 = width + left + right;
int height2 = height + top + bottom;
if(width2 > 0 && height2 > 0) {
switch(src->type) {
case M_BYTE:
case M_UBYTE:
M_REFRAME(uint8_t);
break;
case M_SHORT:
case M_USHORT:
case M_HALF:
M_REFRAME(uint16_t);
break;
case M_INT:
case M_UINT:
M_REFRAME(uint32_t);
break;
case M_FLOAT:
M_REFRAME(float);
break;
default:
assert(0);
break;
}
}
else {
assert(0);
}
}
}
else {
m_image_copy(dest, src);
}
#undef M_REFRAME
}
MIAPI void m_image_reframe(struct m_image *dest, const struct m_image *src, int left, int top, int right, int bottom)
{
#define M_REFRAME(T)\
{\
T *src_data;\
T *src_pixel;\
T *dest_pixel;\
int c;\
int x, y;\
m_image_create(dest, src->type, width2, height2, comp);\
src_data = (T *)src->data;\
dest_pixel = (T *)dest->data;\
for (y = 0; y < height2; y++) {\
T *src_y;\
int ys = y - top;\
src_y = src_data + M_CLAMP(ys, 0, hm1) * width * comp;\
for (x = 0; x < width2; x++) {\
int xs = x - left;\
src_pixel = src_y + M_CLAMP(xs, 0, wm1) * comp;\
for (c = 0; c < comp; c++)\
dest_pixel[c] = src_pixel[c];\
dest_pixel += comp;\
}\
}\
}
if(left != 0 || top != 0 || right != 0 || bottom != 0) {
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_reframe(dest, &tmp, left, top, right, bottom);
m_image_destroy(&tmp);
}
else {
int comp = src->comp;
int width = src->width;
int height = src->height;
int width2 = width + left + right;
int height2 = height + top + bottom;
int wm1 = width - 1;
int hm1 = height - 1;
if(width2 > 0 && height2 > 0) {
switch(src->type) {
case M_BYTE:
case M_UBYTE:
M_REFRAME(uint8_t);
break;
case M_SHORT:
case M_USHORT:
case M_HALF:
M_REFRAME(uint16_t);
break;
case M_INT:
case M_UINT:
M_REFRAME(uint32_t);
break;
case M_FLOAT:
M_REFRAME(float);
break;
default:
assert(0);
break;
}
}
else {
assert(0);
}
}
}
else {
m_image_copy(dest, src);
}
#undef M_REFRAME
}
MIAPI void m_image_rotate_left(struct m_image *dest, const struct m_image *src)
{
#define M_ROTATE_L(T)\
{\
T *src_data = (T *)src->data;\
T *dest_pixel = (T *)dest->data;\
for (y = 0; y < width; y++)\
for (x = 0; x < height; x++) {\
T *src_pixel = src_data + (x * width + (width - 1 - y)) * comp;\
for (c = 0; c < comp; c++)\
dest_pixel[c] = src_pixel[c];\
dest_pixel += comp;\
}\
}
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_rotate_left(dest, &tmp);
m_image_destroy(&tmp);
}
else {
int width = src->width;
int height = src->height;
int comp = src->comp;
int x, y, c;
m_image_create(dest, src->type, height, width, comp);
switch(src->type)
{
case M_BYTE:
case M_UBYTE:
M_ROTATE_L(uint8_t);
break;
case M_SHORT:
case M_USHORT:
case M_HALF:
M_ROTATE_L(uint16_t);
break;
case M_INT:
case M_UINT:
M_ROTATE_L(uint32_t);
break;
case M_FLOAT:
M_ROTATE_L(float);
break;
default:
assert(0);
break;
}
}
#undef M_ROTATE_L
}
MIAPI void m_image_rotate_right(struct m_image *dest, const struct m_image *src)
{
#define M_ROTATE_R(T)\
{\
T *src_data = (T *)src->data;\
T *dest_pixel = (T *)dest->data;\
for (y = 0; y < width; y++)\
for (x = 0; x < height; x++) {\
T *src_pixel = src_data + ((height - 1 - x) * width + y) * comp;\
for (c = 0; c < comp; c++)\
dest_pixel[c] = src_pixel[c];\
dest_pixel += comp;\
}\
}
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_rotate_right(dest, &tmp);
m_image_destroy(&tmp);
}
else {
int width = src->width;
int height = src->height;
int comp = src->comp;
int x, y, c;
m_image_create(dest, src->type, height, width, comp);
switch(src->type)
{
case M_BYTE:
case M_UBYTE:
M_ROTATE_R(uint8_t);
break;
case M_SHORT:
case M_USHORT:
case M_HALF:
M_ROTATE_R(uint16_t);
break;
case M_INT:
case M_UINT:
M_ROTATE_R(uint32_t);
break;
case M_FLOAT:
M_ROTATE_R(float);
break;
default:
assert(0);
break;
}
}
#undef M_ROTATE_R
}
MIAPI void m_image_rotate_180(struct m_image *dest, const struct m_image *src)
{
#define M_ROTATE_180(T)\
{\
T *src_data = (T *)src->data;\
T *dest_pixel = (T *)dest->data;\
for (y = 0; y < height; y++)\
for (x = 0; x < width; x++) {\
T *src_pixel = src_data + ((height - 1 - y) * width + (width - 1 - x)) * comp;\
for (c = 0; c < comp; c++)\
dest_pixel[c] = src_pixel[c];\
dest_pixel += comp;\
}\
}
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_rotate_180(dest, &tmp);
m_image_destroy(&tmp);
}
else {
int width = src->width;
int height = src->height;
int comp = src->comp;
int x, y, c;
m_image_create(dest, src->type, width, height, comp);
switch(src->type)
{
case M_BYTE:
case M_UBYTE:
M_ROTATE_180(uint8_t);
break;
case M_SHORT:
case M_USHORT:
case M_HALF:
M_ROTATE_180(uint16_t);
break;
case M_INT:
case M_UINT:
M_ROTATE_180(uint32_t);
break;
case M_FLOAT:
M_ROTATE_180(float);
break;
default:
assert(0);
break;
}
}
#undef M_ROTATE_180
}
MIAPI void m_image_mirror_x(struct m_image *dest, const struct m_image *src)
{
#define M_MIRROR_X(T)\
{\
T *src_data = (T *)src->data;\
T *dest_pixel = (T *)dest->data;\
for (y = 0; y < height; y++)\
for (x = 0; x < width; x++) {\
T *src_pixel = src_data + (y * width + (width - 1 - x)) * comp;\
for (c = 0; c < comp; c++)\
dest_pixel[c] = src_pixel[c];\
dest_pixel += comp;\
}\
}
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_mirror_x(dest, &tmp);
m_image_destroy(&tmp);
}
else {
int width = src->width;
int height = src->height;
int comp = src->comp;
int x, y, c;
m_image_create(dest, src->type, width, height, comp);
switch(src->type)
{
case M_BYTE:
case M_UBYTE:
M_MIRROR_X(uint8_t);
break;
case M_SHORT:
case M_USHORT:
case M_HALF:
M_MIRROR_X(uint16_t);
break;
case M_INT:
case M_UINT:
M_MIRROR_X(uint32_t);
break;
case M_FLOAT:
M_MIRROR_X(float);
break;
default:
assert(0);
break;
}
}
#undef M_MIRROR_X
}
MIAPI void m_image_mirror_y(struct m_image *dest, const struct m_image *src)
{
#define M_MIRROR_Y(T)\
{\
T *src_data = (T *)src->data;\
T *dest_pixel = (T *)dest->data;\
for (y = 0; y < height; y++)\
for (x = 0; x < width; x++) {\
T *src_pixel = src_data + ((height - 1 - y) * width + x) * comp;\
for (c = 0; c < comp; c++)\
dest_pixel[c] = src_pixel[c];\
dest_pixel += comp;\
}\
}
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_mirror_y(dest, &tmp);
m_image_destroy(&tmp);
}
else {
int width = src->width;
int height = src->height;
int comp = src->comp;
int x, y, c;
m_image_create(dest, src->type, width, height, comp);
switch(src->type)
{
case M_BYTE:
case M_UBYTE:
M_MIRROR_Y(uint8_t);
break;
case M_SHORT:
case M_USHORT:
case M_HALF:
M_MIRROR_Y(uint16_t);
break;
case M_INT:
case M_UINT:
M_MIRROR_Y(uint32_t);
break;
case M_FLOAT:
M_MIRROR_Y(float);
break;
default:
assert(0);
break;
}
}
#undef M_MIRROR_Y
}
MIAPI void m_image_premultiply(struct m_image *dest, const struct m_image *src)
{
float *dest_p, *src_p;
int i;
assert(src->size > 0 && src->type == M_FLOAT && src->comp == 4);
m_image_create(dest, M_FLOAT, src->width, src->height, 4);
dest_p = (float *)dest->data;
src_p = (float *)src->data;
for (i = 0; i < src->size; i+=4) {
dest_p[0] = src_p[0] * src_p[3];
dest_p[1] = src_p[1] * src_p[3];
dest_p[2] = src_p[2] * src_p[3];
dest_p[3] = src_p[3];
dest_p += 4;
src_p += 4;
}
}
MIAPI void m_image_unpremultiply(struct m_image *dest, const struct m_image *src)
{
float *dest_p, *src_p;
int i;
assert(src->size > 0 && src->type == M_FLOAT && src->comp == 4);
m_image_create(dest, M_FLOAT, src->width, src->height, 4);
dest_p = (float *)dest->data;
src_p = (float *)src->data;
for (i = 0; i < src->size; i+=4) {
if (src_p[3] > 0.0f) {
float x = 1.0f / src_p[3];
dest_p[0] = src_p[0] * x;
dest_p[1] = src_p[1] * x;
dest_p[2] = src_p[2] * x;
}
else {
dest_p[0] = 0;
dest_p[1] = 0;
dest_p[2] = 0;
}
dest_p[3] = src_p[3];
dest_p += 4;
src_p += 4;
}
}
MIAPI void m_image_sRGB_to_linear(struct m_image *dest, const struct m_image *src)
{
float *dest_p, *src_p;
int i, c, comp3 = M_MIN(src->comp, 3);
assert(src->size > 0 && src->type == M_FLOAT);
m_image_create(dest, M_FLOAT, src->width, src->height, src->comp);
dest_p = (float *)dest->data;
src_p = (float *)src->data;
if (dest == src) {
#pragma omp parallel for schedule(dynamic, 8)
for (i = 0; i < src->size; i+=src->comp) {
m_sRGB_to_linear(dest_p+i, src_p+i, comp3);
}
}
else {
#pragma omp parallel for schedule(dynamic, 8)
for (i = 0; i < src->size; i+=src->comp) {
m_sRGB_to_linear(dest_p+i, src_p+i, comp3);
for (c = comp3; c < src->comp; c++)
dest_p[i+c] = src_p[i+c];
}
}
}
MIAPI void m_image_linear_to_sRGB(struct m_image *dest, const struct m_image *src)
{
float *dest_p, *src_p;
int i, c, comp3 = M_MIN(src->comp, 3);
assert(src->size > 0 && src->type == M_FLOAT);
m_image_create(dest, M_FLOAT, src->width, src->height, src->comp);
dest_p = (float *)dest->data;
src_p = (float *)src->data;
if (dest == src) {
#pragma omp parallel for schedule(dynamic, 8)
for (i = 0; i < src->size; i+=src->comp) {
m_linear_to_sRGB(dest_p+i, src_p+i, comp3);
}
}
else {
#pragma omp parallel for schedule(dynamic, 8)
for (i = 0; i < src->size; i+=src->comp) {
m_linear_to_sRGB(dest_p+i, src_p+i, comp3);
for (c = comp3; c < src->comp; c++)
dest_p[i+c] = src_p[i+c];
}
}
}
MIAPI void m_image_summed_area(struct m_image *dest, const struct m_image *src)
{
float *src_pixel;
float *dest_pixel;
int width = src->width;
int height = src->height;
int comp = src->comp;
int x, y, c;
assert(src->size > 0 && src->type == M_FLOAT);
if (dest != src)
m_image_copy(dest, src);
/* horiz sum */
dest_pixel = (float *)dest->data;
for (y = 0; y < height; y++) {
float *prev_pixel = dest_pixel;
dest_pixel += comp;
for (x = 1; x < width; x++) {
for (c = 0; c < comp; c++)
dest_pixel[c] += prev_pixel[c];
prev_pixel = dest_pixel;
dest_pixel += comp;
}
}
/* vertical sum */
src_pixel = (float *)dest->data;
dest_pixel = (float *)dest->data + width * comp;
for (y = 1; y < height; y++)
for (x = 0; x < width; x++) {
for (c = 0; c < comp; c++)
dest_pixel[c] += src_pixel[c];
src_pixel += comp;
dest_pixel += comp;
}
}
MIAPI void m_image_convolution_h_raw(struct m_image *dest, const struct m_image *src, float *kernel, int size)
{
float *src_data;
float *dest_data;
int radius = (size - 1) / 2;
int width = src->width - radius * 2;
int height = src->height;
int comp = src->comp;
int y, ystep, ystepc;
assert(src->size > 0 && src->type == M_FLOAT);
/* create destination images */
m_image_create(dest, M_FLOAT, width, height, comp);
/* clear */
memset(dest->data, 0, dest->size * sizeof(float));
src_data = (float *)src->data;
dest_data = (float *)dest->data;
ystep = width * comp;
ystepc = src->width * comp;
#pragma omp parallel for schedule(dynamic, 8)
for (y = 0; y < height; y++) {
float *dest_pixel = dest_data + y * ystep;
float *src_pixel_y = src_data + y * ystepc;
int x;
for (x = 0; x < width; x++) {
float *src_pixel;
int i, k;
src_pixel = src_pixel_y + (x * comp);
/* apply kernel */
for (k = 0; k < size; k++) {
float v = kernel[k];
for (i = 0; i < comp; i++)
dest_pixel[i] += (*src_pixel++) * v;
}
dest_pixel += comp;
}
}
}
MIAPI void m_image_convolution_v_raw(struct m_image *dest, const struct m_image *src, float *kernel, int size)
{
float *src_data;
float *dest_data;
int radius = (size - 1) / 2;
int width = src->width;
int height = src->height - radius * 2;
int comp = src->comp;
int y, ystep;
assert(src->size > 0 && src->type == M_FLOAT);
/* create destination images */
m_image_create(dest, M_FLOAT, width, height, comp);
/* clear */
memset(dest->data, 0, dest->size * sizeof(float));
src_data = (float *)src->data;
dest_data = (float *)dest->data;
ystep = width * comp;
#pragma omp parallel for schedule(dynamic, 8)
for (y = 0; y < height; y++) {
float *dest_pixel = dest_data + y * ystep;
int x;
for (x = 0; x < width; x++) {
float *src_pixel;
int i, k;
src_pixel = src_data + (y * width + x) * comp;
/* apply kernel */
for (k = 0; k < size; k++) {
float v = kernel[k];
for (i = 0; i < comp; i++)
dest_pixel[i] += src_pixel[i] * v;
src_pixel += ystep;
}
dest_pixel += comp;
}
}
}
MIAPI void m_image_convolution_h(struct m_image *dest, const struct m_image *src, float *kernel, int size)
{
struct m_image mask = M_IMAGE_IDENTITY();
struct m_image tmp = M_IMAGE_IDENTITY();
float *destp;
int radius = (size - 1) / 2;
int x, y, c;
assert(src->size > 0 && src->type == M_FLOAT);
/* create source and destination images */
m_image_reframe_zero(&tmp, src, radius, 0, radius, 0); /* apply clamped margin */
m_image_convolution_h_raw(dest, &tmp, kernel, size);
/* create gaussian mask */
m_image_create(&tmp, M_FLOAT, src->width + radius * 2, 1, 1);
for (x = 0; x < radius; x++)
((float *)tmp.data)[x] = 0;
for (; x < (tmp.width - radius); x++)
((float *)tmp.data)[x] = 1;
for (; x < tmp.width; x++)
((float *)tmp.data)[x] = 0;
m_image_convolution_h_raw(&mask, &tmp, kernel, size);
/* mask */
for (x = 0; x < mask.width; x++)
((float *)mask.data)[x] = 1.0f / ((float *)mask.data)[x];
destp = (float *)dest->data;
for (y = 0; y < dest->height; y++) {
for (x = 0; x < dest->width; x++) {
for (c = 0; c < dest->comp; c++)
destp[c] *= ((float *)mask.data)[x];
destp += dest->comp;
}
}
m_image_destroy(&mask);
m_image_destroy(&tmp);
}
MIAPI void m_image_convolution_v(struct m_image *dest, const struct m_image *src, float *kernel, int size)
{
struct m_image tmp = M_IMAGE_IDENTITY();
struct m_image mask = M_IMAGE_IDENTITY();
float *destp;
int radius = (size - 1) / 2;
int x, y, c;
assert(src->size > 0 && src->type == M_FLOAT);
/* create source and destination images */
m_image_reframe_zero(&tmp, src, 0, radius, 0, radius); /* apply clamped margin */
m_image_convolution_v_raw(dest, &tmp, kernel, size);
/* create gaussian mask */
m_image_create(&tmp, M_FLOAT, 1, src->height + radius * 2, 1);
for (y = 0; y < radius; y++)
((float *)tmp.data)[y] = 0;
for (; y < (tmp.height - radius); y++)
((float *)tmp.data)[y] = 1;
for (; y < tmp.height; y++)
((float *)tmp.data)[y] = 0;
m_image_convolution_v_raw(&mask, &tmp, kernel, size);
/* mask */
destp = (float *)dest->data;
for (y = 0; y < dest->height; y++) {
float idiv = 1.0f / ((float *)mask.data)[y];
for (x = 0; x < dest->width; x++) {
for (c = 0; c < dest->comp; c++)
destp[c] *= idiv;
destp += dest->comp;
}
}
m_image_destroy(&mask);
m_image_destroy(&tmp);
}
MIAPI void m_image_gaussian_blur(struct m_image *dest, const struct m_image *src, float dx, float dy)
{
struct m_image tmp = M_IMAGE_IDENTITY();
float *kernelx = NULL, *kernely = NULL;
int kernelx_size = (int)(dx / 0.65f + 0.5f) * 2 + 1;
int kernely_size = (int)(dy / 0.65f + 0.5f) * 2 + 1;
assert(src->size > 0 && src->type == M_FLOAT);
/* exit */
if (dx < FLT_EPSILON && dy < FLT_EPSILON) {
if (dest != src) m_image_copy(dest, src);
return;
}
/* x blur */
if (dx > 0) {
kernelx = (float *)malloc(kernelx_size * sizeof(float));
m_gaussian_kernel(kernelx, kernelx_size, dx);
if (dy > 0)
m_image_convolution_h(&tmp, src, kernelx, kernelx_size);
else
m_image_convolution_h(dest, src, kernelx, kernelx_size);
}
/* y blur */
if (dy > 0) {
kernely = (float *)malloc(kernely_size * sizeof(float));
m_gaussian_kernel(kernely, kernely_size, dy);
if (dx > 0)
m_image_convolution_v(dest, &tmp, kernely, kernely_size);
else
m_image_convolution_v(dest, src, kernely, kernely_size);
}
m_image_destroy(&tmp);
if(kernely) free(kernely);
if(kernelx) free(kernelx);
}
MIAPI void m_image_grey(struct m_image *dest, const struct m_image *src)
{
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_grey(dest, &tmp);
m_image_destroy(&tmp);
}
else {
float *src_pixel;
float *dest_pixel;
int size = src->size;
int i, c = src->comp;
assert(src->size > 0 && src->type == M_FLOAT && src->comp > 2);
m_image_create(dest, M_FLOAT, src->width, src->height, 1);
src_pixel = (float *)src->data;
dest_pixel = (float *)dest->data;
for (i = 0; i < size; i+=c) {
float v = src_pixel[0] * 0.3f + src_pixel[1] * 0.5f + src_pixel[2] * 0.2f;
*dest_pixel = v;
dest_pixel++;
src_pixel+=c;
}
}
}
MIAPI void m_image_max(struct m_image *dest, const struct m_image *src)
{
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_max(dest, &tmp);
m_image_destroy(&tmp);
}
else {
float *src_pixel;
float *dest_pixel;
int size = src->size;
int i, j, c = src->comp;
assert(src->size > 0 && src->type == M_FLOAT);
m_image_create(dest, M_FLOAT, src->width, src->height, 1);
src_pixel = (float *)src->data;
dest_pixel = (float *)dest->data;
for (i = 0; i < size; i+=c) {
float v = src_pixel[0];
for (j = 1; j < c; j++)
v = M_MAX(v, src_pixel[j]);
*dest_pixel = v;
dest_pixel++;
src_pixel+=c;
}
}
}
MIAPI void m_image_max_abs(struct m_image *dest, const struct m_image *src)
{
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_max_abs(dest, &tmp);
m_image_destroy(&tmp);
}
else {
float *src_pixel;
float *dest_pixel;
int size = src->size;
int i, j, c = src->comp;
assert(src->size > 0 && src->type == M_FLOAT);
m_image_create(dest, M_FLOAT, src->width, src->height, 1);
src_pixel = (float *)src->data;
dest_pixel = (float *)dest->data;
for (i = 0; i < size; i+=c) {
float v = fabsf(src_pixel[0]);
for (j = 1; j < c; j++)
v = M_MAX(v, fabsf(src_pixel[j]));
*dest_pixel = v;
dest_pixel++;
src_pixel+=c;
}
}
}
static float m__convolve_pixel(float *data, int width, float *kernel)
{
float sum = 0; int i, j;
for (i = 0; i < 3; i++) {
float *pixel = data + width * i;
for (j = 0; j < 3; j++) {
sum += (*pixel) * (*kernel);
pixel++;
kernel++;
}
}
return sum;
}
MIAPI void m_image_sobel(struct m_image *dest, const struct m_image *src)
{
struct m_image copy = M_IMAGE_IDENTITY();
float ky[9] = {-1, -2, -1, 0, 0, 0, 1, 2, 1};
float kx[9] = {-1, 0, 1, -2, 0, 2, -1, 0, 1};
int width = src->width;
int height = src->height;
int w2 = width + 2;
int y;
assert(src->size > 0 && src->type == M_FLOAT && src->comp == 1);
/* create source and destination images */
m_image_reframe(©, src, 1, 1, 1, 1); /* apply clamped margin */
m_image_create(dest, M_FLOAT, width, height, 2);
#pragma omp parallel for schedule(dynamic, 8)
for (y = 0; y < height; y++) {
float * src_pixel = (float*)copy.data + y * w2;
float * dest_pixel = (float*)dest->data + y * width * 2;
int x;
for (x = 0; x < width; x++) {
dest_pixel[0] = m__convolve_pixel(src_pixel, w2, kx);
dest_pixel[1] = m__convolve_pixel(src_pixel, w2, ky);
src_pixel++;
dest_pixel += 2;
}
}
m_image_destroy(©);
}
MIAPI void m_image_harris(struct m_image *dest, const struct m_image *src, float radius)
{
struct m_image tmp1 = M_IMAGE_IDENTITY();
struct m_image tmp2 = M_IMAGE_IDENTITY();
/* sobel */
m_image_sobel(&tmp1, src);
/* sst */
m_image_create(&tmp2, M_FLOAT, src->width, src->height, 3);
m_sst((float *)tmp2.data, (float *)tmp1.data, src->width * src->height);
/* blur */
m_image_copy(&tmp1, &tmp2);
m_image_gaussian_blur(&tmp2, &tmp1, radius, radius);
/* harris response */
m_image_create(dest, M_FLOAT, src->width, src->height, 1);
m_harris_response((float *)dest->data, (float *)tmp2.data, src->width * src->height);
m_image_destroy(&tmp1);
m_image_destroy(&tmp2);
}
#define M_WRITE_PIXEL(dest, x0, y0, v) {*(dest + w * (y0) + (x0)) = v;}
#define M_PUSH_PIXEL(x2, y2) if((stack_i+3) < stack_size && m__test_pixel(data, w, h, x2, y2, ref)) {\
stack_i+=2;\
stack[stack_i] = (uint16_t)(x2);\
stack[stack_i+1] = (uint16_t)(y2);\
M_WRITE_PIXEL(data, x2, y2, value);\
}
static int m__test_pixel(uint8_t *src, int w, int h, int x, int y, uint8_t ref)
{
if (! (x >= 0 && x < w && y >= 0 && y < h))
return 0;
return (*(src + w * y + x) == ref);
}
MIAPI int m_image_floodfill_4x(struct m_image *dest, int x, int y, uint8_t ref, uint8_t value, uint16_t *stack, int stack_size)
{
uint8_t *data = (uint8_t *)dest->data;
int w = dest->width;
int h = dest->height;
int stack_i = 0;
assert(dest->size > 0 && dest->type == M_UBYTE);
if(! m__test_pixel(data, w, h, x, y, ref))
return 0;
stack[0] = (uint16_t)x;
stack[1] = (uint16_t)y;
M_WRITE_PIXEL(data, x, y, value);
while (stack_i >= 0) {
x = stack[stack_i];
y = stack[stack_i+1];
stack_i-=2;
M_PUSH_PIXEL(x + 1, y)
M_PUSH_PIXEL(x - 1, y)
M_PUSH_PIXEL(x, y + 1)
M_PUSH_PIXEL(x, y - 1)
}
return 1;
}
MIAPI int m_image_floodfill_8x(struct m_image *dest, int x, int y, uint8_t ref, uint8_t value, uint16_t *stack, int stack_size)
{
uint8_t *data = (uint8_t *)dest->data;
int w = dest->width;
int h = dest->height;
int stack_i = 0;
assert(dest->size > 0 && dest->type == M_UBYTE);
if(! m__test_pixel(data, w, h, x, y, ref))
return 0;
stack[0] = (uint16_t)x;
stack[1] = (uint16_t)y;
M_WRITE_PIXEL(data, x, y, value);
while (stack_i >= 0) {
x = stack[stack_i];
y = stack[stack_i+1];
stack_i-=2;
M_PUSH_PIXEL(x + 1, y)
M_PUSH_PIXEL(x - 1, y)
M_PUSH_PIXEL(x, y + 1)
M_PUSH_PIXEL(x, y - 1)
M_PUSH_PIXEL(x + 1, y + 1)
M_PUSH_PIXEL(x + 1, y - 1)
M_PUSH_PIXEL(x - 1, y + 1)
M_PUSH_PIXEL(x - 1, y - 1)
}
return 1;
}
#undef M_WRITE_PIXEL
#undef M_PUSH_PIXEL
static void m__dilate_erode(struct m_image *dest, const struct m_image *src, uint8_t ref, uint8_t value, int copy)
{
uint8_t *src_data = (uint8_t *)src->data;
uint8_t *src_pixel = src_data;
uint8_t *dest_pixel;
int w = src->width;
int h = src->height;
int y;
assert(src->size > 0 && src->type == M_UBYTE);
m_image_create(dest, M_UBYTE, w, h, 1);
dest_pixel = (uint8_t *)dest->data;
if (copy)
memcpy(dest_pixel, src_data, dest->size * sizeof(char));
else
memset(dest_pixel, 0, dest->size * sizeof(char));
for (y=0; y<h; y++) {
int x;
for (x=0; x<w; x++) {
uint8_t c1, c2, c3, c4, c5;
c1 = *src_pixel;
if (c1 == ref) {
c2 = x > 0 ? *(src_data + y * w + (x - 1)) : c1;
c3 = y > 0 ? *(src_data + (y - 1) * w + x) : c1;
c4 = (x + 1) < w ? *(src_data + y * w + x + 1) : c1;
c5 = (y + 1) < h ? *(src_data + (y + 1) * w + x) : c1;
if (c2 != c1 || c3 != c1 || c4 != c1 || c5 != c1)
*dest_pixel = value;
}
src_pixel++;
dest_pixel++;
}
}
}
MIAPI void m_image_dilate(struct m_image *dest, const struct m_image *src)
{
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m__dilate_erode(dest, &tmp, 0, 255, 1);
m_image_destroy(&tmp);
}
else {
m__dilate_erode(dest, src, 0, 255, 1);
}
}
MIAPI void m_image_erode(struct m_image *dest, const struct m_image *src)
{
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m__dilate_erode(dest, &tmp, 255, 0, 1);
m_image_destroy(&tmp);
}
else {
m__dilate_erode(dest, src, 255, 0, 1);
}
}
MIAPI void m_image_edge_4x(struct m_image *dest, const struct m_image *src, uint8_t ref)
{
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m__dilate_erode(dest, &tmp, ref, 255, 0);
m_image_destroy(&tmp);
}
else {
m__dilate_erode(dest, src, ref, 255, 0);
}
}
/* Following C code from the article
"Efficient Binary Image Thinning using Neighborhood Maps"
by Joseph M. Cychosz, in "Graphics Gems IV", Academic Press, 1994
Thins the image using Rosenfeld's parallel thinning algorithm.
*/
/* Direction m__masks:
N S W E
*/
static int m__masks[] = {0200, 0002, 0040, 0010};
/* True if pixel neighbor map indicates the pixel is 8-simple and
not an end point and thus can be deleted. The neighborhood
map is defined as an integer of bits abcdefghi with a non-zero
bit representing a non-zero pixel. The bit assignment for the
neighborhood is:
a b c
d e f
g h i
*/
static uint8_t m__delete_map[512] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
MIAPI void m_image_thin(struct m_image *dest)
{
uint8_t *data; /* image data */
uint8_t ** ip; /* scanline pointers, ip[y][x] */
uint8_t * qb; /* Neighborhood maps of previous scanline */
int xsize, ysize; /* Image resolution */
int x, y; /* Pixel location */
int i; /* Pass index */
int pc = 0; /* Pass count */
int count = 1; /* Deleted pixel count */
int p, q; /* Neighborhood maps of adjacent cells */
int m; /* Deletion direction mask */
assert(dest->size > 0 && dest->type == M_UBYTE);
data = (uint8_t *)dest->data;
xsize = dest->width;
ysize = dest->height;
qb = (uint8_t *)malloc(xsize * sizeof(char));
qb[xsize-1] = 0; /* Used for lower-right pixel */
/* alloc scanline pointers */
ip = (uint8_t **)malloc(sizeof(void *) * ysize);
/* set scanline pointers */
for (y=0; y<ysize; y++) {
ip[y] = data + y*xsize;
}
while (count) { /* Scan image while deletions */
pc++;
count = 0;
for (i=0; i<4; i++) {
m = m__masks[i];
/* Build initial previous scan buffer */
p = ip[0][0] != 0;
for (x=0; x<xsize-1; x++) {
p = ((p<<1)&0006) | (ip[0][x+1] != 0);
qb[x] = (uint8_t)p;
}
/* Scan image for pixel deletion candidates */
for (y=0; y<ysize-1; y++) {
q = qb[0];
p = ((q<<3)&0110) | (ip[y+1][0] != 0);
for (x=0; x<xsize-1; x++) {
q = qb[x];
p = ((p<<1)&0666) | ((q<<3)&0110) | (ip[y+1][x+1] != 0);
qb[x] = (uint8_t)p;
if (((p&m) == 0) && m__delete_map[p]) {
if (ip[y][x] != 0) {
count++;
ip[y][x] = 0;
}
}
}
/* Process right edge pixel */
p = (p<<1)&0666;
if ((p&m) == 0 && m__delete_map[p]) {
if (ip[y][xsize-1] != 0) {
count++;
ip[y][xsize-1] = 0;
}
}
}
/* Process bottom scan line */
for (x=0; x<xsize; x++) {
q = qb[x];
p = ((p<<1)&0666) | ((q<<3)&0110);
if ((p&m) == 0 && m__delete_map[p]) {
if (ip[ysize-1][x] != 0) {
count++;
ip[ysize-1][x] = 0;
}
}
}
}
}
free(qb);
free(ip);
}
MIAPI void m_image_non_max_supp(struct m_image *dest, const struct m_image *src, int radius, float threshold)
{
float *src_data, *dest_data;
float *src_pixel, *dest_pixel;
int width = src->width;
int height = src->height;
int x, y;
assert(src->size > 0 && src->type == M_FLOAT && src->comp == 1);
m_image_copy(dest, src);
src_data = (float *)src->data;
dest_data = (float *)dest->data;
src_pixel = src_data;
dest_pixel = dest_data;
for (y = 0; y < height; y++)
for (x = 0; x < width; x++) {
int minx, miny, maxx, maxy, xx, yy;
if (*src_pixel < threshold) {
*dest_pixel = 0;
goto end;
}
minx = M_MAX(0, x - radius);
miny = M_MAX(0, y - radius);
maxx = M_MIN(width - 1, x + radius);
maxy = M_MIN(height - 1, y + radius);
for (yy = miny; yy <= maxy; yy++)
for (xx = minx; xx <= maxx; xx++) {
float *src_pixel2 = src_data + yy*width + xx;
if (*src_pixel2 > *src_pixel) {
*dest_pixel = 0;
goto end;
}
}
end:
src_pixel++;
dest_pixel++;
}
}
MIAPI int m_image_corner_harris(const struct m_image *src, int margin, float radius, float threshold, int *corners, int max_count)
{
struct m_image harris = M_IMAGE_IDENTITY();
struct m_image nms = M_IMAGE_IDENTITY();
float *pixel;
int width = src->width;
int height = src->height;
int wm = width - margin;
int hm = height - margin;
int x, y, count;
if (width <= (margin * 2) || height <= (margin * 2))
return 0;
m_image_harris(&harris, src, radius);
m_image_non_max_supp(&nms, &harris, (int)(radius) + 1, threshold);
count = 0;
pixel = (float *)nms.data;
for (y = 0; y < height; y++)
for (x = 0; x < width; x++) {
if (count == max_count)
goto end;
if ((*pixel) > 0 && x >= margin && y >= margin && x < wm && y < hm) {
corners[count*2] = x;
corners[count*2+1] = y;
count++;
}
pixel++;
}
end:
m_image_destroy(&nms);
m_image_destroy(&harris);
return count;
}
MIAPI void m_image_sub_pixel(const struct m_image *src, float x, float y, float *result)
{
float *colors0, *colors1, *colors2, *colors3;
float *src_data = (float *)src->data;
int width = src->width;
int height = src->height;
int comp = src->comp;
int c;
float fx, fy;
int wm = width - 1;
int hm = height - 1;
int ix, iy, ix2, iy2;
ix = (int)x;
iy = (int)y;
fx = x - (float)ix;
fy = y - (float)iy;
fx = M_MAX(fx, 0);
fy = M_MAX(fy, 0);
ix = M_CLAMP(ix, 0, wm);
iy = M_CLAMP(iy, 0, hm);
ix2 = ix + 1;
iy2 = iy + 1;
ix2 = M_MIN(ix2, wm);
iy2 = M_MIN(iy2, hm);
colors0 = src_data + (width * iy + ix) * comp;
colors1 = src_data + (width * iy + ix2) * comp;
colors2 = src_data + (width * iy2 + ix) * comp;
colors3 = src_data + (width * iy2 + ix2) * comp;
for(c = 0; c < comp; c++) {
float A = colors0[c] + (colors2[c] - colors0[c]) * fy;
float B = colors1[c] + (colors3[c] - colors1[c]) * fy;
result[c] = A + (B - A) * fx;
}
}
/* slow TODO better */
static void m__bilinear(struct m_image *dest, const struct m_image *src, float dx, float dy, float offset)
{
float *dest_data = (float *)dest->data;
int width = dest->width;
int height = dest->height;
int comp = src->comp;
int y, ystep = width * comp;
#pragma omp parallel for schedule(dynamic, 8)
for (y = 0; y < height; y++) {
float *dest_pixel = dest_data + y * ystep; int x;
for (x = 0; x < width; x++) {
m_image_sub_pixel(src, ((float)x + 0.5f) * dx + offset, ((float)y + 0.5f) * dy + offset, dest_pixel);
dest_pixel += comp;
}
}
}
MIAPI void m_image_pyrdown(struct m_image *dest, const struct m_image *src)
{
struct m_image tmp = M_IMAGE_IDENTITY();
float *src_data;
float *dest_pixel;
int width = src->width;
int height = src->height;
int comp = src->comp;
int comp2 = comp * 2;
int ystep = width * comp * 2;
int w2 = width / 2;
int h2 = height / 2;
int x, y, i;
m_image_gaussian_blur(&tmp, src, 1.5f, 1.5f);
m_image_create(dest, M_FLOAT, w2, h2, comp);
src_data = (float *)tmp.data;
dest_pixel = (float *)dest->data;
for (y = 0; y < h2; y++) {
float *src_pixel = src_data + y * ystep;
for (x = 0; x < w2; x++) {
for (i = 0; i < comp; i++)
dest_pixel[i] = src_pixel[i];
dest_pixel += comp;
src_pixel += comp2;
}
}
m_image_destroy(&tmp);
}
MIAPI void m_image_resize(struct m_image *dest, const struct m_image *src, int new_width, int new_height)
{
struct m_image tmp = M_IMAGE_IDENTITY();
int width = src->width;
int height = src->height;
int comp = src->comp;
float rx = (float)width / (float)new_width;
float ry = (float)height / (float)new_height;
assert(src->size > 0 && src->type == M_FLOAT);
if (rx > 1.0f || ry > 1.0f) {
m_image_gaussian_blur(&tmp, src, M_MAX(0.0f, rx - 1.0f), M_MAX(0.0f, ry - 1.0f));
m_image_create(dest, M_FLOAT, new_width, new_height, comp);
m__bilinear(dest, &tmp, rx, ry, -0.5f);
}
else {
if (dest == src) {
m_image_copy(&tmp, src);
src = &tmp;
}
m_image_create(dest, M_FLOAT, new_width, new_height, comp);
m__bilinear(dest, src, rx, ry, -0.5f);
}
m_image_destroy(&tmp);
}
#endif /* M_IMAGE_IMPLEMENTATION */
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.