source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
zq_cnn_convolution_gemm_nchwc_kernel1x1_neon_raw.h | #define __ARMV8 1
static void conv1x1s1_sgemm_transform_kernel_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch)
{
const float* kernel = _kernel;
// interleave
#if __ARMV8
kernel_tm.create(4 * 8, inch / 4 + inch % 4, outch / 8 + (outch % 8) / 4 + outch % 4);
#else
kernel_tm.create(4 * 4, inch / 4 + inch % 4, outch / 4 + outch % 4);
#endif // __ARM_NEON && __ARMV8
int p = 0;
#if __ARMV8
for (; p + 7<outch; p += 8)
{
const float* kernel0 = kernel + (p + 0)*inch;
const float* kernel1 = kernel + (p + 1)*inch;
const float* kernel2 = kernel + (p + 2)*inch;
const float* kernel3 = kernel + (p + 3)*inch;
const float* kernel4 = kernel + (p + 4)*inch;
const float* kernel5 = kernel + (p + 5)*inch;
const float* kernel6 = kernel + (p + 6)*inch;
const float* kernel7 = kernel + (p + 7)*inch;
float* ktmp = kernel_tm.channel(p / 8);
for (int q = 0; q<inch; q++)
{
// kernel0...7 0
ktmp[0] = kernel0[0];
ktmp[1] = kernel1[0];
ktmp[2] = kernel2[0];
ktmp[3] = kernel3[0];
ktmp[4] = kernel4[0];
ktmp[5] = kernel5[0];
ktmp[6] = kernel6[0];
ktmp[7] = kernel7[0];
ktmp += 8;
kernel0 += 1;
kernel1 += 1;
kernel2 += 1;
kernel3 += 1;
kernel4 += 1;
kernel5 += 1;
kernel6 += 1;
kernel7 += 1;
}
}
#endif // __ARMV8
for (; p + 3<outch; p += 4)
{
const float* kernel0 = kernel + (p + 0)*inch;
const float* kernel1 = kernel + (p + 1)*inch;
const float* kernel2 = kernel + (p + 2)*inch;
const float* kernel3 = kernel + (p + 3)*inch;
#if __ARMV8
float* ktmp = kernel_tm.channel(p / 8 + (p % 8) / 4);
#else
float* ktmp = kernel_tm.channel(p / 4);
#endif // __ARMV8
for (int q = 0; q<inch; q++)
{
// kernel0...3 0
ktmp[0] = kernel0[0];
ktmp[1] = kernel1[0];
ktmp[2] = kernel2[0];
ktmp[3] = kernel3[0];
ktmp += 4;
kernel0 += 1;
kernel1 += 1;
kernel2 += 1;
kernel3 += 1;
}
}
for (; p<outch; p++)
{
const float* kernel0 = kernel + p*inch;
#if __ARMV8
float* ktmp = kernel_tm.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
float* ktmp = kernel_tm.channel(p / 4 + p % 4);
#endif //__ARMV8
for (int q = 0; q<inch; q++)
{
ktmp[0] = kernel0[0];
ktmp++;
kernel0++;
}
}
}
static void conv1x1s1_sgemm_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
const int size = w * h;
const float* bias = _bias;
// interleave
Mat tmp(8 * 4, inch / 4 + inch % 4, size / 8 + (size % 8) / 4 + size % 4, 4u, opt.workspace_allocator);
{
int nn_size = size >> 3;
int remain_size_start = nn_size << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii<nn_size; ii++)
{
int i = ii * 8;
const float* img0 = bottom_blob.channel(0);
img0 += i;
float* tmpptr = tmp.channel(i / 8);
for (int q = 0; q<inch; q++)
{
#if __ARMV8
vst1q_f32(tmpptr, vld1q_f32(img0));
vst1q_f32(tmpptr + 4, vld1q_f32(img0 + 4));
tmpptr += 8;
img0 += bottom_blob.cstep;
#else
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {d0-d3}, [%0 :128] \n"
"vst1.f32 {d0-d3}, [%1 :128]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0", "q1"
);
img0 += bottom_blob.cstep;
#endif // __ARMV8
}
}
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii<nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const float* img0 = bottom_blob.channel(0);
img0 += i;
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
for (int q = 0; q<inch; q++)
{
#if __ARMV8
vst1q_f32(tmpptr, vld1q_f32(img0));
tmpptr += 4;
img0 += bottom_blob.cstep;
#else
asm volatile(
"pld [%0, #128] \n"
"vld1.f32 {d0-d1}, [%0 :128] \n"
"vst1.f32 {d0-d1}, [%1 :128]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0"
);
img0 += bottom_blob.cstep;
#endif // __ARMV8
}
}
remain_size_start += nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i<size; i++)
{
const float* img0 = bottom_blob.channel(0);
img0 += i;
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
for (int q = 0; q<inch; q++)
{
tmpptr[0] = img0[0];
tmpptr++;
img0 += bottom_blob.cstep;
}
}
}
int nn_outch = 0;
int remain_outch_start = 0;
#if __ARMV8
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp<nn_outch; pp++)
{
int p = pp * 8;
float* outptr0 = top_blob.channel(p);
float* outptr1 = top_blob.channel(p + 1);
float* outptr2 = top_blob.channel(p + 2);
float* outptr3 = top_blob.channel(p + 3);
float* outptr4 = top_blob.channel(p + 4);
float* outptr5 = top_blob.channel(p + 5);
float* outptr6 = top_blob.channel(p + 6);
float* outptr7 = top_blob.channel(p + 7);
const float zeros[8] = { 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f };
const float* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i + 7<size; i += 8)
{
const float* tmpptr = tmp.channel(i / 8);
const float* kptr = kernel.channel(p / 8);
asm volatile(
"ld1 {v0.4s, v1.4s}, [%20] \n"
"dup v16.4s, v0.s[0] \n"
"dup v17.4s, v0.s[0] \n"
"dup v18.4s, v0.s[1] \n"
"dup v19.4s, v0.s[1] \n"
"dup v20.4s, v0.s[2] \n"
"dup v21.4s, v0.s[2] \n"
"dup v22.4s, v0.s[3] \n"
"dup v23.4s, v0.s[3] \n"
"dup v24.4s, v1.s[0] \n"
"dup v25.4s, v1.s[0] \n"
"dup v26.4s, v1.s[1] \n"
"dup v27.4s, v1.s[1] \n"
"dup v28.4s, v1.s[2] \n"
"dup v29.4s, v1.s[2] \n"
"dup v30.4s, v1.s[3] \n"
"dup v31.4s, v1.s[3] \n"
// inch loop
"lsr w4, %w21, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v18.4s, v8.4s, v0.s[1] \n"
"fmla v20.4s, v8.4s, v0.s[2] \n"
"fmla v22.4s, v8.4s, v0.s[3] \n"
"fmla v17.4s, v9.4s, v0.s[0] \n"
"fmla v19.4s, v9.4s, v0.s[1] \n"
"fmla v21.4s, v9.4s, v0.s[2] \n"
"fmla v23.4s, v9.4s, v0.s[3] \n"
"fmla v24.4s, v8.4s, v1.s[0] \n"
"fmla v26.4s, v8.4s, v1.s[1] \n"
"fmla v28.4s, v8.4s, v1.s[2] \n"
"fmla v30.4s, v8.4s, v1.s[3] \n"
"fmla v25.4s, v9.4s, v1.s[0] \n"
"fmla v27.4s, v9.4s, v1.s[1] \n"
"fmla v29.4s, v9.4s, v1.s[2] \n"
"fmla v31.4s, v9.4s, v1.s[3] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%8], #64 \n"
"fmla v16.4s, v10.4s, v2.s[0] \n"
"fmla v18.4s, v10.4s, v2.s[1] \n"
"fmla v20.4s, v10.4s, v2.s[2] \n"
"fmla v22.4s, v10.4s, v2.s[3] \n"
"fmla v17.4s, v11.4s, v2.s[0] \n"
"fmla v19.4s, v11.4s, v2.s[1] \n"
"fmla v21.4s, v11.4s, v2.s[2] \n"
"fmla v23.4s, v11.4s, v2.s[3] \n"
"fmla v24.4s, v10.4s, v3.s[0] \n"
"fmla v26.4s, v10.4s, v3.s[1] \n"
"fmla v28.4s, v10.4s, v3.s[2] \n"
"fmla v30.4s, v10.4s, v3.s[3] \n"
"fmla v25.4s, v11.4s, v3.s[0] \n"
"fmla v27.4s, v11.4s, v3.s[1] \n"
"fmla v29.4s, v11.4s, v3.s[2] \n"
"fmla v31.4s, v11.4s, v3.s[3] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n"
"fmla v16.4s, v12.4s, v4.s[0] \n"
"fmla v18.4s, v12.4s, v4.s[1] \n"
"fmla v20.4s, v12.4s, v4.s[2] \n"
"fmla v22.4s, v12.4s, v4.s[3] \n"
"fmla v17.4s, v13.4s, v4.s[0] \n"
"fmla v19.4s, v13.4s, v4.s[1] \n"
"fmla v21.4s, v13.4s, v4.s[2] \n"
"fmla v23.4s, v13.4s, v4.s[3] \n"
"fmla v24.4s, v12.4s, v5.s[0] \n"
"fmla v26.4s, v12.4s, v5.s[1] \n"
"fmla v28.4s, v12.4s, v5.s[2] \n"
"fmla v30.4s, v12.4s, v5.s[3] \n"
"fmla v25.4s, v13.4s, v5.s[0] \n"
"fmla v27.4s, v13.4s, v5.s[1] \n"
"fmla v29.4s, v13.4s, v5.s[2] \n"
"fmla v31.4s, v13.4s, v5.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v16.4s, v14.4s, v6.s[0] \n"
"fmla v18.4s, v14.4s, v6.s[1] \n"
"fmla v20.4s, v14.4s, v6.s[2] \n"
"fmla v22.4s, v14.4s, v6.s[3] \n"
"fmla v17.4s, v15.4s, v6.s[0] \n"
"fmla v19.4s, v15.4s, v6.s[1] \n"
"fmla v21.4s, v15.4s, v6.s[2] \n"
"fmla v23.4s, v15.4s, v6.s[3] \n"
"fmla v24.4s, v14.4s, v7.s[0] \n"
"fmla v26.4s, v14.4s, v7.s[1] \n"
"fmla v28.4s, v14.4s, v7.s[2] \n"
"fmla v30.4s, v14.4s, v7.s[3] \n"
"fmla v25.4s, v15.4s, v7.s[0] \n"
"fmla v27.4s, v15.4s, v7.s[1] \n"
"fmla v29.4s, v15.4s, v7.s[2] \n"
"fmla v31.4s, v15.4s, v7.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w21, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%8, #256] \n"
"ld1 {v8.4s, v9.4s}, [%8], #32 \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4s, v1.4s}, [%9], #32 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v18.4s, v8.4s, v0.s[1] \n"
"fmla v20.4s, v8.4s, v0.s[2] \n"
"fmla v22.4s, v8.4s, v0.s[3] \n"
"fmla v17.4s, v9.4s, v0.s[0] \n"
"fmla v19.4s, v9.4s, v0.s[1] \n"
"fmla v21.4s, v9.4s, v0.s[2] \n"
"fmla v23.4s, v9.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v24.4s, v8.4s, v1.s[0] \n"
"fmla v26.4s, v8.4s, v1.s[1] \n"
"fmla v28.4s, v8.4s, v1.s[2] \n"
"fmla v30.4s, v8.4s, v1.s[3] \n"
"fmla v25.4s, v9.4s, v1.s[0] \n"
"fmla v27.4s, v9.4s, v1.s[1] \n"
"fmla v29.4s, v9.4s, v1.s[2] \n"
"fmla v31.4s, v9.4s, v1.s[3] \n"
"bne 2b \n"
"3: \n"
"st1 {v16.4s, v17.4s}, [%0], #32 \n"
"st1 {v18.4s, v19.4s}, [%1], #32 \n"
"st1 {v20.4s, v21.4s}, [%2], #32 \n"
"st1 {v22.4s, v23.4s}, [%3], #32 \n"
"st1 {v24.4s, v25.4s}, [%4], #32 \n"
"st1 {v26.4s, v27.4s}, [%5], #32 \n"
"st1 {v28.4s, v29.4s}, [%6], #32 \n"
"st1 {v30.4s, v31.4s}, [%7], #32 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(outptr4), // %4
"=r"(outptr5), // %5
"=r"(outptr6), // %6
"=r"(outptr7), // %7
"=r"(tmpptr), // %8
"=r"(kptr) // %9
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(outptr4),
"5"(outptr5),
"6"(outptr6),
"7"(outptr7),
"8"(tmpptr),
"9"(kptr),
"r"(biasptr), // %20
"r"(inch) // %21
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
for (; i + 3<size; i += 4)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const float* kptr = kernel.channel(p / 8);
asm volatile(
"ld1 {v0.4s, v1.4s}, [%20] \n"
"dup v16.4s, v0.s[0] \n"
"dup v17.4s, v0.s[1] \n"
"dup v18.4s, v0.s[2] \n"
"dup v19.4s, v0.s[3] \n"
"dup v20.4s, v1.s[0] \n"
"dup v21.4s, v1.s[1] \n"
"dup v22.4s, v1.s[2] \n"
"dup v23.4s, v1.s[3] \n"
// inch loop
"lsr w4, %w21, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v0.s[1] \n"
"fmla v18.4s, v8.4s, v0.s[2] \n"
"fmla v19.4s, v8.4s, v0.s[3] \n"
"fmla v20.4s, v8.4s, v1.s[0] \n"
"fmla v21.4s, v8.4s, v1.s[1] \n"
"fmla v22.4s, v8.4s, v1.s[2] \n"
"fmla v23.4s, v8.4s, v1.s[3] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n"
"fmla v16.4s, v9.4s, v2.s[0] \n"
"fmla v17.4s, v9.4s, v2.s[1] \n"
"fmla v18.4s, v9.4s, v2.s[2] \n"
"fmla v19.4s, v9.4s, v2.s[3] \n"
"fmla v20.4s, v9.4s, v3.s[0] \n"
"fmla v21.4s, v9.4s, v3.s[1] \n"
"fmla v22.4s, v9.4s, v3.s[2] \n"
"fmla v23.4s, v9.4s, v3.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v16.4s, v10.4s, v4.s[0] \n"
"fmla v17.4s, v10.4s, v4.s[1] \n"
"fmla v18.4s, v10.4s, v4.s[2] \n"
"fmla v19.4s, v10.4s, v4.s[3] \n"
"fmla v20.4s, v10.4s, v5.s[0] \n"
"fmla v21.4s, v10.4s, v5.s[1] \n"
"fmla v22.4s, v10.4s, v5.s[2] \n"
"fmla v23.4s, v10.4s, v5.s[3] \n"
"fmla v16.4s, v11.4s, v6.s[0] \n"
"fmla v17.4s, v11.4s, v6.s[1] \n"
"fmla v18.4s, v11.4s, v6.s[2] \n"
"fmla v19.4s, v11.4s, v6.s[3] \n"
"fmla v20.4s, v11.4s, v7.s[0] \n"
"fmla v21.4s, v11.4s, v7.s[1] \n"
"fmla v22.4s, v11.4s, v7.s[2] \n"
"fmla v23.4s, v11.4s, v7.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w21, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v8.4s}, [%8], #16 \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4s, v1.4s}, [%9], #32 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v0.s[1] \n"
"fmla v18.4s, v8.4s, v0.s[2] \n"
"fmla v19.4s, v8.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v20.4s, v8.4s, v1.s[0] \n"
"fmla v21.4s, v8.4s, v1.s[1] \n"
"fmla v22.4s, v8.4s, v1.s[2] \n"
"fmla v23.4s, v8.4s, v1.s[3] \n"
"bne 2b \n"
"3: \n"
"st1 {v16.4s}, [%0], #16 \n"
"st1 {v17.4s}, [%1], #16 \n"
"st1 {v18.4s}, [%2], #16 \n"
"st1 {v19.4s}, [%3], #16 \n"
"st1 {v20.4s}, [%4], #16 \n"
"st1 {v21.4s}, [%5], #16 \n"
"st1 {v22.4s}, [%6], #16 \n"
"st1 {v23.4s}, [%7], #16 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(outptr4), // %4
"=r"(outptr5), // %5
"=r"(outptr6), // %6
"=r"(outptr7), // %7
"=r"(tmpptr), // %8
"=r"(kptr) // %9
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(outptr4),
"5"(outptr5),
"6"(outptr6),
"7"(outptr7),
"8"(tmpptr),
"9"(kptr),
"r"(biasptr), // %20
"r"(inch) // %21
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
);
}
for (; i<size; i++)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
const float* kptr = kernel.channel(p / 8);
asm volatile(
"ld1 {v24.4s, v25.4s}, [%20] \n"
// inch loop
"lsr w4, %w21, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"0: \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v8.4s}, [%8], #16 \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"fmla v16.4s, v0.4s, v8.s[0] \n"
"fmla v17.4s, v1.4s, v8.s[0] \n"
"fmla v18.4s, v2.4s, v8.s[1] \n"
"fmla v19.4s, v3.4s, v8.s[1] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n"
"subs w4, w4, #1 \n"
"fmla v20.4s, v4.4s, v8.s[2] \n"
"fmla v21.4s, v5.4s, v8.s[2] \n"
"fmla v22.4s, v6.4s, v8.s[3] \n"
"fmla v23.4s, v7.4s, v8.s[3] \n"
"bne 0b \n"
"fadd v16.4s, v16.4s, v18.4s \n"
"fadd v17.4s, v17.4s, v19.4s \n"
"fadd v20.4s, v20.4s, v22.4s \n"
"fadd v21.4s, v21.4s, v23.4s \n"
"fadd v16.4s, v16.4s, v20.4s \n"
"fadd v17.4s, v17.4s, v21.4s \n"
"fadd v24.4s, v24.4s, v16.4s \n"
"fadd v25.4s, v25.4s, v17.4s \n"
"1: \n"
// remain loop
"and w4, %w21, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%8, #32] \n"
"ld1r {v8.4s}, [%8], #4 \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4s, v1.4s}, [%9], #32 \n"
"subs w4, w4, #1 \n"
"fmla v24.4s, v8.4s, v0.4s \n"
"fmla v25.4s, v8.4s, v1.4s \n"
"bne 2b \n"
"3: \n"
"st1 {v24.s}[0],[%0], #4 \n"
"st1 {v24.s}[1],[%1], #4 \n"
"st1 {v24.s}[2],[%2], #4 \n"
"st1 {v24.s}[3],[%3], #4 \n"
"st1 {v25.s}[0],[%4], #4 \n"
"st1 {v25.s}[1],[%5], #4 \n"
"st1 {v25.s}[2],[%6], #4 \n"
"st1 {v25.s}[3],[%7], #4 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(outptr4), // %4
"=r"(outptr5), // %5
"=r"(outptr6), // %6
"=r"(outptr7), // %7
"=r"(tmpptr), // %8
"=r"(kptr) // %9
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(outptr4),
"5"(outptr5),
"6"(outptr6),
"7"(outptr7),
"8"(tmpptr),
"9"(kptr),
"r"(biasptr), // %20
"r"(inch) // %21
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25"
);
}
}
#endif // __ARMV8
nn_outch = (outch - remain_outch_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp<nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
float* outptr0 = top_blob.channel(p);
float* outptr1 = top_blob.channel(p + 1);
float* outptr2 = top_blob.channel(p + 2);
float* outptr3 = top_blob.channel(p + 3);
const float zeros[4] = { 0.f, 0.f, 0.f, 0.f };
const float* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i + 7<size; i += 8)
{
const float* tmpptr = tmp.channel(i / 8);
#if __ARMV8
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4);
#else
const float* kptr = kernel.channel(p / 4);
#endif // __ARMV8
#if __ARMV8
asm volatile(
"ld1 {v0.4s}, [%12] \n"
"dup v8.4s, v0.s[0] \n"
"dup v9.4s, v0.s[0] \n"
"dup v10.4s, v0.s[1] \n"
"dup v11.4s, v0.s[1] \n"
"dup v12.4s, v0.s[2] \n"
"dup v13.4s, v0.s[2] \n"
"dup v14.4s, v0.s[3] \n"
"dup v15.4s, v0.s[3] \n"
// inch loop
"lsr w4, %w13, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v10.4s, v4.4s, v0.s[1] \n"
"fmla v12.4s, v4.4s, v0.s[2] \n"
"fmla v14.4s, v4.4s, v0.s[3] \n"
"fmla v9.4s, v5.4s, v0.s[0] \n"
"fmla v11.4s, v5.4s, v0.s[1] \n"
"fmla v13.4s, v5.4s, v0.s[2] \n"
"fmla v15.4s, v5.4s, v0.s[3] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n"
"fmla v8.4s, v6.4s, v1.s[0] \n"
"fmla v10.4s, v6.4s, v1.s[1] \n"
"fmla v12.4s, v6.4s, v1.s[2] \n"
"fmla v14.4s, v6.4s, v1.s[3] \n"
"fmla v9.4s, v7.4s, v1.s[0] \n"
"fmla v11.4s, v7.4s, v1.s[1] \n"
"fmla v13.4s, v7.4s, v1.s[2] \n"
"fmla v15.4s, v7.4s, v1.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v16.4s, v2.s[0] \n"
"fmla v10.4s, v16.4s, v2.s[1] \n"
"fmla v12.4s, v16.4s, v2.s[2] \n"
"fmla v14.4s, v16.4s, v2.s[3] \n"
"fmla v9.4s, v17.4s, v2.s[0] \n"
"fmla v11.4s, v17.4s, v2.s[1] \n"
"fmla v13.4s, v17.4s, v2.s[2] \n"
"fmla v15.4s, v17.4s, v2.s[3] \n"
"fmla v8.4s, v18.4s, v3.s[0] \n"
"fmla v10.4s, v18.4s, v3.s[1] \n"
"fmla v12.4s, v18.4s, v3.s[2] \n"
"fmla v14.4s, v18.4s, v3.s[3] \n"
"fmla v9.4s, v19.4s, v3.s[0] \n"
"fmla v11.4s, v19.4s, v3.s[1] \n"
"fmla v13.4s, v19.4s, v3.s[2] \n"
"fmla v15.4s, v19.4s, v3.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w13, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v4.4s, v5.4s}, [%4], #32 \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v10.4s, v4.4s, v0.s[1] \n"
"fmla v12.4s, v4.4s, v0.s[2] \n"
"fmla v14.4s, v4.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v9.4s, v5.4s, v0.s[0] \n"
"fmla v11.4s, v5.4s, v0.s[1] \n"
"fmla v13.4s, v5.4s, v0.s[2] \n"
"fmla v15.4s, v5.4s, v0.s[3] \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s, v9.4s}, [%0], #32 \n"
"st1 {v10.4s, v11.4s}, [%1], #32 \n"
"st1 {v12.4s, v13.4s}, [%2], #32 \n"
"st1 {v14.4s, v15.4s}, [%3], #32 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(inch) // %13
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"
);
#else // __ARMV8
asm volatile(
"vld1.f32 {d0-d1}, [%12] \n"
"vdup.f32 q8, d0[0] \n"
"vdup.f32 q9, d0[0] \n"
"vdup.f32 q10, d0[1] \n"
"vdup.f32 q11, d0[1] \n"
"vdup.f32 q12, d1[0] \n"
"vdup.f32 q13, d1[0] \n"
"vdup.f32 q14, d1[1] \n"
"vdup.f32 q15, d1[1] \n"
// inch loop
"lsr r4, %13, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%4, #512] \n"
"vldm %4!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%4 :128]! \n"
// "vld1.f32 {d12-d15}, [%4 :128]! \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
// "vld1.f32 {d0-d3}, [%5 :128]! \n"
// "vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q10, q4, d0[1] \n"
"vmla.f32 q12, q4, d1[0] \n"
"vmla.f32 q14, q4, d1[1] \n"
"vmla.f32 q9, q5, d0[0] \n"
"vmla.f32 q11, q5, d0[1] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q15, q5, d1[1] \n"
"vmla.f32 q8, q6, d2[0] \n"
"vmla.f32 q10, q6, d2[1] \n"
"vmla.f32 q12, q6, d3[0] \n"
"vmla.f32 q14, q6, d3[1] \n"
"vmla.f32 q9, q7, d2[0] \n"
"vmla.f32 q11, q7, d2[1] \n"
"vmla.f32 q13, q7, d3[0] \n"
"vmla.f32 q15, q7, d3[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%4 :128]! \n"
// "vld1.f32 {d12-d15}, [%4 :128]! \n"
"vmla.f32 q8, q4, d4[0] \n"
"vmla.f32 q10, q4, d4[1] \n"
"vmla.f32 q12, q4, d5[0] \n"
"vmla.f32 q14, q4, d5[1] \n"
"vmla.f32 q9, q5, d4[0] \n"
"vmla.f32 q11, q5, d4[1] \n"
"vmla.f32 q13, q5, d5[0] \n"
"vmla.f32 q15, q5, d5[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q6, d6[0] \n"
"vmla.f32 q10, q6, d6[1] \n"
"vmla.f32 q12, q6, d7[0] \n"
"vmla.f32 q14, q6, d7[1] \n"
"vmla.f32 q9, q7, d6[0] \n"
"vmla.f32 q11, q7, d6[1] \n"
"vmla.f32 q13, q7, d7[0] \n"
"vmla.f32 q15, q7, d7[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %13, #3 \n"// r4 = remain = inch & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%4, #256] \n"
"vld1.f32 {d8-d11}, [%4 :128]! \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q10, q4, d0[1] \n"
"vmla.f32 q12, q4, d1[0] \n"
"vmla.f32 q14, q4, d1[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q9, q5, d0[0] \n"
"vmla.f32 q11, q5, d0[1] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q15, q5, d1[1] \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d19}, [%0 :128]! \n"
"vst1.f32 {d20-d23}, [%1 :128]! \n"
"vst1.f32 {d24-d27}, [%2 :128]! \n"
"vst1.f32 {d28-d31}, [%3 :128]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(inch) // %13
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __ARMV8
}
for (; i + 3<size; i += 4)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
#if __ARMV8
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4);
#else
const float* kptr = kernel.channel(p / 4);
#endif // __ARMV8
#if __ARMV8
asm volatile(
"ld1 {v0.4s}, [%12] \n"
"dup v8.4s, v0.s[0] \n"
"dup v9.4s, v0.s[1] \n"
"dup v10.4s, v0.s[2] \n"
"dup v11.4s, v0.s[3] \n"
// inch loop
"lsr w4, %w13, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"fmla v8.4s, v5.4s, v1.s[0] \n"
"fmla v9.4s, v5.4s, v1.s[1] \n"
"fmla v10.4s, v5.4s, v1.s[2] \n"
"fmla v11.4s, v5.4s, v1.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v6.4s, v2.s[0] \n"
"fmla v9.4s, v6.4s, v2.s[1] \n"
"fmla v10.4s, v6.4s, v2.s[2] \n"
"fmla v11.4s, v6.4s, v2.s[3] \n"
"fmla v8.4s, v7.4s, v3.s[0] \n"
"fmla v9.4s, v7.4s, v3.s[1] \n"
"fmla v10.4s, v7.4s, v3.s[2] \n"
"fmla v11.4s, v7.4s, v3.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w13, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v4.4s}, [%4], #16 \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s}, [%0], #16 \n"
"st1 {v9.4s}, [%1], #16 \n"
"st1 {v10.4s}, [%2], #16 \n"
"st1 {v11.4s}, [%3], #16 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(inch) // %13
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"
);
#else // __ARMV8
asm volatile(
"vld1.f32 {d0-d1}, [%12] \n"
"vdup.f32 q8, d0[0] \n"
"vdup.f32 q9, d0[1] \n"
"vdup.f32 q10, d1[0] \n"
"vdup.f32 q11, d1[1] \n"
// inch loop
"lsr r4, %13, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%4, #512] \n"
"vldm %4!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%4 :128]! \n"
// "vld1.f32 {d12-d15}, [%4 :128]! \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
// "vld1.f32 {d0-d3}, [%5 :128]! \n"
// "vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"vmla.f32 q8, q5, d2[0] \n"
"vmla.f32 q9, q5, d2[1] \n"
"vmla.f32 q10, q5, d3[0] \n"
"vmla.f32 q11, q5, d3[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q6, d4[0] \n"
"vmla.f32 q9, q6, d4[1] \n"
"vmla.f32 q10, q6, d5[0] \n"
"vmla.f32 q11, q6, d5[1] \n"
"vmla.f32 q8, q7, d6[0] \n"
"vmla.f32 q9, q7, d6[1] \n"
"vmla.f32 q10, q7, d7[0] \n"
"vmla.f32 q11, q7, d7[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %13, #3 \n"// r4 = remain = inch & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%4, #128] \n"
"vld1.f32 {d8-d9}, [%4 :128]! \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
"vst1.f32 {d18-d19}, [%1 :128]! \n"
"vst1.f32 {d20-d21}, [%2 :128]! \n"
"vst1.f32 {d22-d23}, [%3 :128]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(inch) // %13
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"
);
#endif // __ARMV8
}
for (; i<size; i++)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
#if __ARMV8
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4);
#else
const float* kptr = kernel.channel(p / 4);
#endif // __ARMV8
#if __ARMV8
asm volatile(
"ld1 {v12.4s}, [%12] \n"
// inch loop
"lsr w4, %w13, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"0: \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v4.4s}, [%4], #16 \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v1.4s, v4.s[1] \n"
"fmla v10.4s, v2.4s, v4.s[2] \n"
"fmla v11.4s, v3.4s, v4.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v9.4s \n"
"fadd v10.4s, v10.4s, v11.4s \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"fadd v12.4s, v12.4s, v8.4s \n"
"1: \n"
// remain loop
"and w4, %w13, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%4, #32] \n"
"ld1r {v4.4s}, [%4], #4 \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"subs w4, w4, #1 \n"
"fmla v12.4s, v4.4s, v0.4s \n"
"bne 2b \n"
"3: \n"
"st1 {v12.s}[0], [%0], #4 \n"
"st1 {v12.s}[1], [%1], #4 \n"
"st1 {v12.s}[2], [%2], #4 \n"
"st1 {v12.s}[3], [%3], #4 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(inch) // %13
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11", "v12"
);
#else // __ARMV8
asm volatile(
"vld1.f32 {d24-d25}, [%12] \n"
// inch loop
"lsr r4, %13, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"veor q8, q8, q8 \n"
"veor q9, q9, q9 \n"
"veor q10, q10, q10 \n"
"veor q11, q11, q11 \n"
"0: \n"
"pld [%4, #128] \n"
"vld1.f32 {d8-d9}, [%4 :128]! \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
// "vld1.f32 {d0-d3}, [%5 :128]! \n"
// "vld1.f32 {d4-d7}, [%5 :128]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q9, q1, d8[1] \n"
"vmla.f32 q10, q2, d9[0] \n"
"vmla.f32 q11, q3, d9[1] \n"
"bne 0b \n"
"vadd.f32 q8, q8, q9 \n"
"vadd.f32 q10, q10, q11 \n"
"vadd.f32 q8, q8, q10 \n"
"vadd.f32 q12, q12, q8 \n"
"1: \n"
// remain loop
"and r4, %13, #3 \n"// r4 = remain = inch & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%4, #32] \n"
"vld1.f32 {d8[],d9[]}, [%4]! \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q12, q4, q0 \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d24[0]}, [%0]! \n"
"vst1.f32 {d24[1]}, [%1]! \n"
"vst1.f32 {d25[0]}, [%2]! \n"
"vst1.f32 {d25[1]}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(inch) // %13
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12"
);
#endif // __ARMV8
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
float* outptr0 = out0;
int i = 0;
for (; i + 7<size; i += 8)
{
const float* tmpptr = tmp.channel(i / 8);
#if __ARMV8
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
const float* kptr = kernel.channel(p / 4 + p % 4);
#endif // __ARMV8
#if __ARMV8
asm volatile(
"dup v8.4s, %w6 \n"
"dup v9.4s, %w6 \n"
// inch loop
"lsr w4, %w7, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4s}, [%2], #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v5.4s, v0.s[0] \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n"
"fmla v8.4s, v6.4s, v0.s[1] \n"
"fmla v9.4s, v7.4s, v0.s[1] \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v12.4s, v0.s[2] \n"
"fmla v9.4s, v13.4s, v0.s[2] \n"
"fmla v8.4s, v14.4s, v0.s[3] \n"
"fmla v9.4s, v15.4s, v0.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w7, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v4.4s, v5.4s}, [%1], #32 \n"
"prfm pldl1keep, [%2, #32] \n"
"ld1r {v0.4s}, [%2], #4 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v4.4s, v0.4s \n"
"fmla v9.4s, v5.4s, v0.4s \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s, v9.4s}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(bias0), // %6
"r"(inch) // %7
: "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v12", "v13", "v14", "v15"
);
#else // __ARMV8
asm volatile(
"vdup.f32 q8, %6 \n"
"vdup.f32 q9, %6 \n"
// inch loop
"lsr r4, %7, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%1, #512] \n"
"vldm %1!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%1 :128]! \n"
// "vld1.f32 {d12-d15}, [%1 :128]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d0-d1}, [%2 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q5, d0[0] \n"
"pld [%1, #512] \n"
"vldm %1!, {d24-d31} \n"
// "vld1.f32 {d24-d27}, [%1 :128]! \n"
// "vld1.f32 {d28-d31}, [%1 :128]! \n"
"vmla.f32 q8, q6, d0[1] \n"
"vmla.f32 q9, q7, d0[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q12, d1[0] \n"
"vmla.f32 q9, q13, d1[0] \n"
"vmla.f32 q8, q14, d1[1] \n"
"vmla.f32 q9, q15, d1[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %7, #3 \n"// r4 = remain = inch & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%1, #256] \n"
"vld1.f32 {d8-d11}, [%1 :128]! \n"
"pld [%2, #32] \n"
"vld1.f32 {d0[],d1[]}, [%2]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, q0 \n"
"vmla.f32 q9, q5, q0 \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d19}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(bias0), // %6
"r"(inch) // %7
: "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q12", "q13", "q14", "q15"
);
#endif // __ARMV8
}
for (; i + 3<size; i += 4)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
#if __ARMV8
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
const float* kptr = kernel.channel(p / 4 + p % 4);
#endif // __ARMV8
#if __ARMV8
asm volatile(
"dup v8.4s, %w6 \n"
// inch loop
"lsr w4, %w7, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4s}, [%2], #16 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v8.4s, v5.4s, v0.s[1] \n"
"fmla v8.4s, v6.4s, v0.s[2] \n"
"fmla v8.4s, v7.4s, v0.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w7, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v4.4s}, [%1], #16 \n"
"prfm pldl1keep, [%2, #32] \n"
"ld1r {v0.4s}, [%2], #4 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v4.4s, v0.4s \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(bias0), // %6
"r"(inch) // %7
: "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8"
);
#else // __ARMV8
asm volatile(
"vdup.f32 q8, %6 \n"
// inch loop
"lsr r4, %7, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%1, #512] \n"
"vldm %1!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%1 :128]! \n"
// "vld1.f32 {d12-d15}, [%1 :128]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d0-d1}, [%2]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q8, q7, d1[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %7, #3 \n"// r4 = remain = inch & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%1, #128] \n"
"vld1.f32 {d8-d9}, [%1 :128]! \n"
"pld [%2, #32] \n"
"vld1.f32 {d0[],d1[]}, [%2]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, q0 \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(bias0), // %6
"r"(inch) // %7
: "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8"
);
#endif // __ARMV8
}
for (; i<size; i++)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
#if __ARMV8
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
const float* kptr = kernel.channel(p / 4 + p % 4);
#endif // __ARMV8
int q = 0;
float32x4_t _sum0 = vdupq_n_f32(0.f);
for (; q + 3<inch; q += 4)
{
float32x4_t _p0 = vld1q_f32(tmpptr);
tmpptr += 4;
float32x4_t _k0 = vld1q_f32(kptr);
kptr += 4;
#if __ARMV8
_sum0 = vfmaq_f32(_sum0, _p0, _k0);
#else
_sum0 = vmlaq_f32(_sum0, _p0, _k0);
#endif
}
#if __ARMV8
float sum0 = bias0 + vaddvq_f32(_sum0);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0));
float sum0 = bias0 + vget_lane_f32(vpadd_f32(_ss, _ss), 0);
#endif
for (; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
tmpptr++;
kptr++;
}
outptr0[0] = sum0;
outptr0++;
}
}
}
|
convolution_3x3_pack8to1_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd42_transform_kernel_pack8to1_int8_neon(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt)
{
// winograd42 transform kernel
Mat kernel_tm(6 * 6, inch, outch, (size_t)2u);
const short ktm[6][3] = {
{6, 0, 0},
{-4, -4, -4},
{-4, 4, -4},
{1, 2, 4},
{1, -2, 4},
{0, 0, 6}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9;
short* kernel_tm0 = kernel_tm.channel(p).row<short>(q);
// transform kernel
const signed char* k0 = kernel0;
const signed char* k1 = kernel0 + 3;
const signed char* k2 = kernel0 + 6;
// h
short tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
short* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 36-inch-outch
// dst = 8a-inch/8a-36-outch
kernel_tm_pack8to1.create(8 * inch / 8, 36, outch / 8 + outch % 8, (size_t)2u * 8, 8);
int p = 0;
for (; p + 7 < outch; p += 8)
{
const Mat k0 = kernel_tm.channel(p);
const Mat k1 = kernel_tm.channel(p + 1);
const Mat k2 = kernel_tm.channel(p + 2);
const Mat k3 = kernel_tm.channel(p + 3);
const Mat k4 = kernel_tm.channel(p + 4);
const Mat k5 = kernel_tm.channel(p + 5);
const Mat k6 = kernel_tm.channel(p + 6);
const Mat k7 = kernel_tm.channel(p + 7);
Mat g0 = kernel_tm_pack8to1.channel(p / 8);
for (int k = 0; k < 36; k++)
{
short* g00 = g0.row<short>(k);
for (int q = 0; q + 7 < inch; q += 8)
{
for (int i = 0; i < 8; i++)
{
g00[0] = k0.row<const short>(q + i)[k];
g00[1] = k1.row<const short>(q + i)[k];
g00[2] = k2.row<const short>(q + i)[k];
g00[3] = k3.row<const short>(q + i)[k];
g00[4] = k4.row<const short>(q + i)[k];
g00[5] = k5.row<const short>(q + i)[k];
g00[6] = k6.row<const short>(q + i)[k];
g00[7] = k7.row<const short>(q + i)[k];
g00 += 8;
}
}
}
}
for (; p < outch; p++)
{
const Mat k0 = kernel_tm.channel(p);
Mat g0 = kernel_tm_pack8to1.channel(p / 8 + p % 8);
for (int k = 0; k < 36; k++)
{
short* g00 = g0.row<short>(k);
for (int q = 0; q + 7 < inch; q += 8)
{
for (int i = 0; i < 8; i++)
{
g00[0] = k0.row<const short>(q + i)[k];
g00 += 1;
}
}
}
}
}
static void conv3x3s1_winograd42_pack8to1_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
// size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = w_tm / 6 * h_tm / 6;
bottom_blob_tm.create(tiles, 36, inch, 2u * elempack, elempack, opt.workspace_allocator);
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r04 + r03
// 2 = 4 * (r01 - r02) + r04 - r03
// 3 = -2 * (r01 - r03) + r04 - r02
// 4 = 2 * (r01 - r03) + r04 - r02
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
short tmp[6][6][8];
// tile
for (int i = 0; i < h_tm / 6; i++)
{
for (int j = 0; j < w_tm / 6; j++)
{
const signed char* r0 = img0.row<const signed char>(i * 4) + (j * 4) * 8;
for (int m = 0; m < 6; m++)
{
int8x8_t _r00 = vld1_s8(r0);
int8x8_t _r01 = vld1_s8(r0 + 8);
int8x8_t _r02 = vld1_s8(r0 + 16);
int8x8_t _r03 = vld1_s8(r0 + 24);
int8x8_t _r04 = vld1_s8(r0 + 32);
int8x8_t _r05 = vld1_s8(r0 + 40);
int8x8_t _v4s8 = vdup_n_s8(4);
int8x8_t _v5s8 = vdup_n_s8(5);
int16x8_t _v2 = vdupq_n_s16(2);
int16x8_t _v4 = vdupq_n_s16(4);
// int16x8_t _tmp0m = vfmsq_n_f16(vfmaq_n_f16(_r04, _r00, 4.f), _r02, 5.f);
int16x8_t _tmp0m = vsubq_s16(vaddw_s8(vmull_s8(_r00, _v4s8), _r04), vmull_s8(_r02, _v5s8));
// int16x8_t _tmp1m = vfmsq_n_f16(vaddq_f16(_r04, _r03), vaddq_f16(_r01, _r02), 4.f);
int16x8_t _tmp1m = vmlsq_s16(vaddl_s8(_r04, _r03), vaddl_s8(_r01, _r02), _v4);
// int16x8_t _tmp2m = vfmaq_n_f16(vsubq_f16(_r04, _r03), vsubq_f16(_r01, _r02), 4.f);
int16x8_t _tmp2m = vmlaq_s16(vsubl_s8(_r04, _r03), vsubl_s8(_r01, _r02), _v4);
// int16x8_t _tmp3m = vfmsq_n_f16(vsubq_f16(_r04, _r02), vsubq_f16(_r01, _r03), 2.f);
int16x8_t _tmp3m = vmlsq_s16(vsubl_s8(_r04, _r02), vsubl_s8(_r01, _r03), _v2);
// int16x8_t _tmp4m = vfmaq_n_f16(vsubq_f16(_r04, _r02), vsubq_f16(_r01, _r03), 2.f);
int16x8_t _tmp4m = vmlaq_s16(vsubl_s8(_r04, _r02), vsubl_s8(_r01, _r03), _v2);
// int16x8_t _tmp5m = vfmsq_n_f16(vfmaq_n_f16(_r05, _r01, 4.f), _r03, 5.f);
int16x8_t _tmp5m = vsubq_s16(vaddw_s8(vmull_s8(_r01, _v4s8), _r05), vmull_s8(_r03, _v5s8));
vst1q_s16(tmp[0][m], _tmp0m);
vst1q_s16(tmp[1][m], _tmp1m);
vst1q_s16(tmp[2][m], _tmp2m);
vst1q_s16(tmp[3][m], _tmp3m);
vst1q_s16(tmp[4][m], _tmp4m);
vst1q_s16(tmp[5][m], _tmp5m);
r0 += w * 8;
}
short* r0_tm_0 = (short*)img0_tm + (i * w_tm / 6 + j) * 8;
short* r0_tm_1 = r0_tm_0 + tiles * 8;
short* r0_tm_2 = r0_tm_0 + tiles * 16;
short* r0_tm_3 = r0_tm_0 + tiles * 24;
short* r0_tm_4 = r0_tm_0 + tiles * 32;
short* r0_tm_5 = r0_tm_0 + tiles * 40;
for (int m = 0; m < 6; m++)
{
int16x8_t _tmp00 = vld1q_s16(tmp[m][0]);
int16x8_t _tmp01 = vld1q_s16(tmp[m][1]);
int16x8_t _tmp02 = vld1q_s16(tmp[m][2]);
int16x8_t _tmp03 = vld1q_s16(tmp[m][3]);
int16x8_t _tmp04 = vld1q_s16(tmp[m][4]);
int16x8_t _tmp05 = vld1q_s16(tmp[m][5]);
int16x8_t _v2 = vdupq_n_s16(2);
int16x8_t _v4 = vdupq_n_s16(4);
int16x8_t _v5 = vdupq_n_s16(5);
int16x8_t _r0tm0 = vmlsq_s16(vmlaq_s16(_tmp04, _tmp00, _v4), _tmp02, _v5);
int16x8_t _r0tm1 = vmlsq_s16(vaddq_s16(_tmp04, _tmp03), vaddq_s16(_tmp01, _tmp02), _v4);
int16x8_t _r0tm2 = vmlaq_s16(vsubq_s16(_tmp04, _tmp03), vsubq_s16(_tmp01, _tmp02), _v4);
int16x8_t _r0tm3 = vmlsq_s16(vsubq_s16(_tmp04, _tmp02), vsubq_s16(_tmp01, _tmp03), _v2);
int16x8_t _r0tm4 = vmlaq_s16(vsubq_s16(_tmp04, _tmp02), vsubq_s16(_tmp01, _tmp03), _v2);
int16x8_t _r0tm5 = vmlsq_s16(vmlaq_s16(_tmp05, _tmp01, _v4), _tmp03, _v5);
vst1q_s16(r0_tm_0, _r0tm0);
vst1q_s16(r0_tm_1, _r0tm1);
vst1q_s16(r0_tm_2, _r0tm2);
vst1q_s16(r0_tm_3, _r0tm3);
vst1q_s16(r0_tm_4, _r0tm4);
vst1q_s16(r0_tm_5, _r0tm5);
r0_tm_0 += tiles * 48;
r0_tm_1 += tiles * 48;
r0_tm_2 += tiles * 48;
r0_tm_3 += tiles * 48;
r0_tm_4 += tiles * 48;
r0_tm_5 += tiles * 48;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = h_tm / 6 * w_tm / 6;
// permute
// bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
#if __aarch64__
if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 36, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 36, 2u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator);
#else
if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 36, 2u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator);
#endif // __aarch64__
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 36; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
#if __aarch64__
for (; i + 7 < tiles; i += 8)
{
short* tm2p = tm2.row<short>(i / 8);
const short* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 8x8
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n"
"ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n"
"sub %0, %0, #64 \n"
"uzp1 v16.8h, v0.8h, v4.8h \n"
"uzp2 v20.8h, v0.8h, v4.8h \n"
"uzp1 v17.8h, v1.8h, v5.8h \n"
"uzp2 v21.8h, v1.8h, v5.8h \n"
"uzp1 v18.8h, v2.8h, v6.8h \n"
"uzp2 v22.8h, v2.8h, v6.8h \n"
"uzp1 v19.8h, v3.8h, v7.8h \n"
"uzp2 v23.8h, v3.8h, v7.8h \n"
"st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n"
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
r0 += bottom_blob_tm.cstep * 8;
}
}
#endif
for (; i + 3 < tiles; i += 4)
{
#if __aarch64__
short* tm2p = tm2.row<short>(i / 8 + (i % 8) / 4);
#else
short* tm2p = tm2.row<short>(i / 4);
#endif
const short* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 8x4
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n"
"st4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3");
#else
asm volatile(
"pld [%0, #512] \n"
"vldm %0, {d0-d7} \n"
"vswp d1, d2 \n"
"vswp d5, d6 \n"
"vswp q1, q2 \n"
"vst4.s16 {d0-d3}, [%1 :64]! \n"
"vst4.s16 {d4-d7}, [%1 :64]! \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "q0", "q1", "q2", "q3");
#endif // __aarch64__
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i < tiles; i++)
{
#if __aarch64__
short* tm2p = tm2.row<short>(i / 8 + (i % 8) / 4 + i % 4);
#else
short* tm2p = tm2.row<short>(i / 4 + i % 4);
#endif
const short* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.8h}, [%0] \n"
"st1 {v0.8h}, [%1], #16 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0");
#else
asm volatile(
"pld [%0, #128] \n"
"vld1.s16 {d0-d1}, [%0 :64] \n"
"vst1.s16 {d0-d1}, [%1 :64]! \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "q0");
#endif // __aarch64__
r0 += bottom_blob_tm.cstep * 8;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 36, outch, 4u, 1, opt.workspace_allocator);
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
int* output0_tm = top_blob_tm.channel(p);
int* output1_tm = top_blob_tm.channel(p + 1);
int* output2_tm = top_blob_tm.channel(p + 2);
int* output3_tm = top_blob_tm.channel(p + 3);
int* output4_tm = top_blob_tm.channel(p + 4);
int* output5_tm = top_blob_tm.channel(p + 5);
int* output6_tm = top_blob_tm.channel(p + 6);
int* output7_tm = top_blob_tm.channel(p + 7);
const Mat kernel01_tm = kernel_tm.channel(p / 8);
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
#if __aarch64__
for (; i + 7 < tiles; i += 8)
{
const short* r0 = bb2.row<const short>(i / 8);
const short* kptr = kernel01_tm.row<const short>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%9], #64 \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%10], #64 \n"
"smlal v16.4s, v8.4h, v0.h[0] \n"
"smlal2 v17.4s, v8.8h, v0.h[0] \n"
"smlal v18.4s, v8.4h, v0.h[1] \n"
"smlal2 v19.4s, v8.8h, v0.h[1] \n"
"smlal v20.4s, v8.4h, v0.h[2] \n"
"smlal2 v21.4s, v8.8h, v0.h[2] \n"
"smlal v22.4s, v8.4h, v0.h[3] \n"
"smlal2 v23.4s, v8.8h, v0.h[3] \n"
"smlal v24.4s, v8.4h, v0.h[4] \n"
"smlal2 v25.4s, v8.8h, v0.h[4] \n"
"smlal v26.4s, v8.4h, v0.h[5] \n"
"smlal2 v27.4s, v8.8h, v0.h[5] \n"
"smlal v28.4s, v8.4h, v0.h[6] \n"
"smlal2 v29.4s, v8.8h, v0.h[6] \n"
"smlal v30.4s, v8.4h, v0.h[7] \n"
"smlal2 v31.4s, v8.8h, v0.h[7] \n"
"smlal v16.4s, v9.4h, v1.h[0] \n"
"smlal2 v17.4s, v9.8h, v1.h[0] \n"
"smlal v18.4s, v9.4h, v1.h[1] \n"
"smlal2 v19.4s, v9.8h, v1.h[1] \n"
"smlal v20.4s, v9.4h, v1.h[2] \n"
"smlal2 v21.4s, v9.8h, v1.h[2] \n"
"smlal v22.4s, v9.4h, v1.h[3] \n"
"smlal2 v23.4s, v9.8h, v1.h[3] \n"
"smlal v24.4s, v9.4h, v1.h[4] \n"
"smlal2 v25.4s, v9.8h, v1.h[4] \n"
"smlal v26.4s, v9.4h, v1.h[5] \n"
"smlal2 v27.4s, v9.8h, v1.h[5] \n"
"smlal v28.4s, v9.4h, v1.h[6] \n"
"smlal2 v29.4s, v9.8h, v1.h[6] \n"
"smlal v30.4s, v9.4h, v1.h[7] \n"
"smlal2 v31.4s, v9.8h, v1.h[7] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%9], #64 \n"
"smlal v16.4s, v10.4h, v2.h[0] \n"
"smlal2 v17.4s, v10.8h, v2.h[0] \n"
"smlal v18.4s, v10.4h, v2.h[1] \n"
"smlal2 v19.4s, v10.8h, v2.h[1] \n"
"smlal v20.4s, v10.4h, v2.h[2] \n"
"smlal2 v21.4s, v10.8h, v2.h[2] \n"
"smlal v22.4s, v10.4h, v2.h[3] \n"
"smlal2 v23.4s, v10.8h, v2.h[3] \n"
"smlal v24.4s, v10.4h, v2.h[4] \n"
"smlal2 v25.4s, v10.8h, v2.h[4] \n"
"smlal v26.4s, v10.4h, v2.h[5] \n"
"smlal2 v27.4s, v10.8h, v2.h[5] \n"
"smlal v28.4s, v10.4h, v2.h[6] \n"
"smlal2 v29.4s, v10.8h, v2.h[6] \n"
"smlal v30.4s, v10.4h, v2.h[7] \n"
"smlal2 v31.4s, v10.8h, v2.h[7] \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%10], #64 \n"
"smlal v16.4s, v11.4h, v3.h[0] \n"
"smlal2 v17.4s, v11.8h, v3.h[0] \n"
"smlal v18.4s, v11.4h, v3.h[1] \n"
"smlal2 v19.4s, v11.8h, v3.h[1] \n"
"smlal v20.4s, v11.4h, v3.h[2] \n"
"smlal2 v21.4s, v11.8h, v3.h[2] \n"
"smlal v22.4s, v11.4h, v3.h[3] \n"
"smlal2 v23.4s, v11.8h, v3.h[3] \n"
"smlal v24.4s, v11.4h, v3.h[4] \n"
"smlal2 v25.4s, v11.8h, v3.h[4] \n"
"smlal v26.4s, v11.4h, v3.h[5] \n"
"smlal2 v27.4s, v11.8h, v3.h[5] \n"
"smlal v28.4s, v11.4h, v3.h[6] \n"
"smlal2 v29.4s, v11.8h, v3.h[6] \n"
"smlal v30.4s, v11.4h, v3.h[7] \n"
"smlal2 v31.4s, v11.8h, v3.h[7] \n"
"smlal v16.4s, v12.4h, v4.h[0] \n"
"smlal2 v17.4s, v12.8h, v4.h[0] \n"
"smlal v18.4s, v12.4h, v4.h[1] \n"
"smlal2 v19.4s, v12.8h, v4.h[1] \n"
"smlal v20.4s, v12.4h, v4.h[2] \n"
"smlal2 v21.4s, v12.8h, v4.h[2] \n"
"smlal v22.4s, v12.4h, v4.h[3] \n"
"smlal2 v23.4s, v12.8h, v4.h[3] \n"
"smlal v24.4s, v12.4h, v4.h[4] \n"
"smlal2 v25.4s, v12.8h, v4.h[4] \n"
"smlal v26.4s, v12.4h, v4.h[5] \n"
"smlal2 v27.4s, v12.8h, v4.h[5] \n"
"smlal v28.4s, v12.4h, v4.h[6] \n"
"smlal2 v29.4s, v12.8h, v4.h[6] \n"
"smlal v30.4s, v12.4h, v4.h[7] \n"
"smlal2 v31.4s, v12.8h, v4.h[7] \n"
"smlal v16.4s, v13.4h, v5.h[0] \n"
"smlal2 v17.4s, v13.8h, v5.h[0] \n"
"smlal v18.4s, v13.4h, v5.h[1] \n"
"smlal2 v19.4s, v13.8h, v5.h[1] \n"
"smlal v20.4s, v13.4h, v5.h[2] \n"
"smlal2 v21.4s, v13.8h, v5.h[2] \n"
"smlal v22.4s, v13.4h, v5.h[3] \n"
"smlal2 v23.4s, v13.8h, v5.h[3] \n"
"smlal v24.4s, v13.4h, v5.h[4] \n"
"smlal2 v25.4s, v13.8h, v5.h[4] \n"
"smlal v26.4s, v13.4h, v5.h[5] \n"
"smlal2 v27.4s, v13.8h, v5.h[5] \n"
"smlal v28.4s, v13.4h, v5.h[6] \n"
"smlal2 v29.4s, v13.8h, v5.h[6] \n"
"smlal v30.4s, v13.4h, v5.h[7] \n"
"smlal2 v31.4s, v13.8h, v5.h[7] \n"
"smlal v16.4s, v14.4h, v6.h[0] \n"
"smlal2 v17.4s, v14.8h, v6.h[0] \n"
"smlal v18.4s, v14.4h, v6.h[1] \n"
"smlal2 v19.4s, v14.8h, v6.h[1] \n"
"smlal v20.4s, v14.4h, v6.h[2] \n"
"smlal2 v21.4s, v14.8h, v6.h[2] \n"
"smlal v22.4s, v14.4h, v6.h[3] \n"
"smlal2 v23.4s, v14.8h, v6.h[3] \n"
"smlal v24.4s, v14.4h, v6.h[4] \n"
"smlal2 v25.4s, v14.8h, v6.h[4] \n"
"smlal v26.4s, v14.4h, v6.h[5] \n"
"smlal2 v27.4s, v14.8h, v6.h[5] \n"
"smlal v28.4s, v14.4h, v6.h[6] \n"
"smlal2 v29.4s, v14.8h, v6.h[6] \n"
"smlal v30.4s, v14.4h, v6.h[7] \n"
"smlal2 v31.4s, v14.8h, v6.h[7] \n"
"subs %w0, %w0, #1 \n"
"smlal v16.4s, v15.4h, v7.h[0] \n"
"smlal2 v17.4s, v15.8h, v7.h[0] \n"
"smlal v18.4s, v15.4h, v7.h[1] \n"
"smlal2 v19.4s, v15.8h, v7.h[1] \n"
"smlal v20.4s, v15.4h, v7.h[2] \n"
"smlal2 v21.4s, v15.8h, v7.h[2] \n"
"smlal v22.4s, v15.4h, v7.h[3] \n"
"smlal2 v23.4s, v15.8h, v7.h[3] \n"
"smlal v24.4s, v15.4h, v7.h[4] \n"
"smlal2 v25.4s, v15.8h, v7.h[4] \n"
"smlal v26.4s, v15.4h, v7.h[5] \n"
"smlal2 v27.4s, v15.8h, v7.h[5] \n"
"smlal v28.4s, v15.4h, v7.h[6] \n"
"smlal2 v29.4s, v15.8h, v7.h[6] \n"
"smlal v30.4s, v15.4h, v7.h[7] \n"
"smlal2 v31.4s, v15.8h, v7.h[7] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s}, [%1], #32 \n"
"st1 {v18.4s, v19.4s}, [%2], #32 \n"
"st1 {v20.4s, v21.4s}, [%3], #32 \n"
"st1 {v22.4s, v23.4s}, [%4], #32 \n"
"st1 {v24.4s, v25.4s}, [%5], #32 \n"
"st1 {v26.4s, v27.4s}, [%6], #32 \n"
"st1 {v28.4s, v29.4s}, [%7], #32 \n"
"st1 {v30.4s, v31.4s}, [%8], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(output4_tm), // %5
"=r"(output5_tm), // %6
"=r"(output6_tm), // %7
"=r"(output7_tm), // %8
"=r"(r0), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(output4_tm),
"6"(output5_tm),
"7"(output6_tm),
"8"(output7_tm),
"9"(r0),
"10"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
#endif
for (; i + 3 < tiles; i += 4)
{
#if __aarch64__
const short* r0 = bb2.row<const short>(i / 8 + (i % 8) / 4);
#else
const short* r0 = bb2.row<const short>(i / 4);
#endif
const short* k0 = kernel01_tm.row<const short>(r);
int nn = inch; // inch always > 0
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
int32x4_t _sum4 = vdupq_n_s32(0);
int32x4_t _sum5 = vdupq_n_s32(0);
int32x4_t _sum6 = vdupq_n_s32(0);
int32x4_t _sum7 = vdupq_n_s32(0);
for (int j = 0; j < nn; j++)
{
int16x8_t _val0 = vld1q_s16(r0);
int16x8_t _val1 = vld1q_s16(r0 + 8);
int16x8_t _val2 = vld1q_s16(r0 + 16);
int16x8_t _val3 = vld1q_s16(r0 + 24);
int16x8_t _w0 = vld1q_s16(k0);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_val0), vget_low_s16(_w0), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_low_s16(_val0), vget_low_s16(_w0), 1);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_val0), vget_low_s16(_w0), 2);
_sum3 = vmlal_lane_s16(_sum3, vget_low_s16(_val0), vget_low_s16(_w0), 3);
_sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_val0), vget_high_s16(_w0), 0);
_sum5 = vmlal_lane_s16(_sum5, vget_low_s16(_val0), vget_high_s16(_w0), 1);
_sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_val0), vget_high_s16(_w0), 2);
_sum7 = vmlal_lane_s16(_sum7, vget_low_s16(_val0), vget_high_s16(_w0), 3);
int16x8_t _w1 = vld1q_s16(k0 + 8);
_sum0 = vmlal_lane_s16(_sum0, vget_high_s16(_val0), vget_low_s16(_w1), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_val0), vget_low_s16(_w1), 1);
_sum2 = vmlal_lane_s16(_sum2, vget_high_s16(_val0), vget_low_s16(_w1), 2);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_val0), vget_low_s16(_w1), 3);
_sum4 = vmlal_lane_s16(_sum4, vget_high_s16(_val0), vget_high_s16(_w1), 0);
_sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_val0), vget_high_s16(_w1), 1);
_sum6 = vmlal_lane_s16(_sum6, vget_high_s16(_val0), vget_high_s16(_w1), 2);
_sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_val0), vget_high_s16(_w1), 3);
int16x8_t _w2 = vld1q_s16(k0 + 16);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_val1), vget_low_s16(_w2), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_low_s16(_val1), vget_low_s16(_w2), 1);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_val1), vget_low_s16(_w2), 2);
_sum3 = vmlal_lane_s16(_sum3, vget_low_s16(_val1), vget_low_s16(_w2), 3);
_sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_val1), vget_high_s16(_w2), 0);
_sum5 = vmlal_lane_s16(_sum5, vget_low_s16(_val1), vget_high_s16(_w2), 1);
_sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_val1), vget_high_s16(_w2), 2);
_sum7 = vmlal_lane_s16(_sum7, vget_low_s16(_val1), vget_high_s16(_w2), 3);
int16x8_t _w3 = vld1q_s16(k0 + 24);
_sum0 = vmlal_lane_s16(_sum0, vget_high_s16(_val1), vget_low_s16(_w3), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_val1), vget_low_s16(_w3), 1);
_sum2 = vmlal_lane_s16(_sum2, vget_high_s16(_val1), vget_low_s16(_w3), 2);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_val1), vget_low_s16(_w3), 3);
_sum4 = vmlal_lane_s16(_sum4, vget_high_s16(_val1), vget_high_s16(_w3), 0);
_sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_val1), vget_high_s16(_w3), 1);
_sum6 = vmlal_lane_s16(_sum6, vget_high_s16(_val1), vget_high_s16(_w3), 2);
_sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_val1), vget_high_s16(_w3), 3);
int16x8_t _w4 = vld1q_s16(k0 + 32);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_val2), vget_low_s16(_w4), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_low_s16(_val2), vget_low_s16(_w4), 1);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_val2), vget_low_s16(_w4), 2);
_sum3 = vmlal_lane_s16(_sum3, vget_low_s16(_val2), vget_low_s16(_w4), 3);
_sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_val2), vget_high_s16(_w4), 0);
_sum5 = vmlal_lane_s16(_sum5, vget_low_s16(_val2), vget_high_s16(_w4), 1);
_sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_val2), vget_high_s16(_w4), 2);
_sum7 = vmlal_lane_s16(_sum7, vget_low_s16(_val2), vget_high_s16(_w4), 3);
int16x8_t _w5 = vld1q_s16(k0 + 40);
_sum0 = vmlal_lane_s16(_sum0, vget_high_s16(_val2), vget_low_s16(_w5), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_val2), vget_low_s16(_w5), 1);
_sum2 = vmlal_lane_s16(_sum2, vget_high_s16(_val2), vget_low_s16(_w5), 2);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_val2), vget_low_s16(_w5), 3);
_sum4 = vmlal_lane_s16(_sum4, vget_high_s16(_val2), vget_high_s16(_w5), 0);
_sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_val2), vget_high_s16(_w5), 1);
_sum6 = vmlal_lane_s16(_sum6, vget_high_s16(_val2), vget_high_s16(_w5), 2);
_sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_val2), vget_high_s16(_w5), 3);
int16x8_t _w6 = vld1q_s16(k0 + 48);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_val3), vget_low_s16(_w6), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_low_s16(_val3), vget_low_s16(_w6), 1);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_val3), vget_low_s16(_w6), 2);
_sum3 = vmlal_lane_s16(_sum3, vget_low_s16(_val3), vget_low_s16(_w6), 3);
_sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_val3), vget_high_s16(_w6), 0);
_sum5 = vmlal_lane_s16(_sum5, vget_low_s16(_val3), vget_high_s16(_w6), 1);
_sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_val3), vget_high_s16(_w6), 2);
_sum7 = vmlal_lane_s16(_sum7, vget_low_s16(_val3), vget_high_s16(_w6), 3);
int16x8_t _w7 = vld1q_s16(k0 + 56);
_sum0 = vmlal_lane_s16(_sum0, vget_high_s16(_val3), vget_low_s16(_w7), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_val3), vget_low_s16(_w7), 1);
_sum2 = vmlal_lane_s16(_sum2, vget_high_s16(_val3), vget_low_s16(_w7), 2);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_val3), vget_low_s16(_w7), 3);
_sum4 = vmlal_lane_s16(_sum4, vget_high_s16(_val3), vget_high_s16(_w7), 0);
_sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_val3), vget_high_s16(_w7), 1);
_sum6 = vmlal_lane_s16(_sum6, vget_high_s16(_val3), vget_high_s16(_w7), 2);
_sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_val3), vget_high_s16(_w7), 3);
r0 += 32;
k0 += 64;
}
vst1q_s32(output0_tm, _sum0);
vst1q_s32(output1_tm, _sum1);
vst1q_s32(output2_tm, _sum2);
vst1q_s32(output3_tm, _sum3);
vst1q_s32(output4_tm, _sum4);
vst1q_s32(output5_tm, _sum5);
vst1q_s32(output6_tm, _sum6);
vst1q_s32(output7_tm, _sum7);
output0_tm += 4;
output1_tm += 4;
output2_tm += 4;
output3_tm += 4;
output4_tm += 4;
output5_tm += 4;
output6_tm += 4;
output7_tm += 4;
}
for (; i < tiles; i++)
{
#if __aarch64__
const short* r0 = bb2.row<const short>(i / 8 + (i % 8) / 4 + i % 4);
#else
const short* r0 = bb2.row<const short>(i / 4 + i % 4);
#endif
const short* k0 = kernel01_tm.row<const short>(r);
int nn = inch; // inch always > 0
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
for (int j = 0; j < nn; j++)
{
int16x8_t _val0 = vld1q_s16(r0);
int16x8_t _w0 = vld1q_s16(k0);
int16x8_t _w1 = vld1q_s16(k0 + 8);
int16x8_t _w2 = vld1q_s16(k0 + 16);
int16x8_t _w3 = vld1q_s16(k0 + 24);
int16x8_t _w4 = vld1q_s16(k0 + 32);
int16x8_t _w5 = vld1q_s16(k0 + 40);
int16x8_t _w6 = vld1q_s16(k0 + 48);
int16x8_t _w7 = vld1q_s16(k0 + 56);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w0), vget_low_s16(_val0), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w0), vget_low_s16(_val0), 0);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w1), vget_low_s16(_val0), 1);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w1), vget_low_s16(_val0), 1);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w2), vget_low_s16(_val0), 2);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w2), vget_low_s16(_val0), 2);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w3), vget_low_s16(_val0), 3);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w3), vget_low_s16(_val0), 3);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w4), vget_high_s16(_val0), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w4), vget_high_s16(_val0), 0);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w5), vget_high_s16(_val0), 1);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w5), vget_high_s16(_val0), 1);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w6), vget_high_s16(_val0), 2);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w6), vget_high_s16(_val0), 2);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w7), vget_high_s16(_val0), 3);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w7), vget_high_s16(_val0), 3);
r0 += 8;
k0 += 64;
}
output0_tm[0] = vgetq_lane_s32(_sum0, 0);
output1_tm[0] = vgetq_lane_s32(_sum0, 1);
output2_tm[0] = vgetq_lane_s32(_sum0, 2);
output3_tm[0] = vgetq_lane_s32(_sum0, 3);
output4_tm[0] = vgetq_lane_s32(_sum1, 0);
output5_tm[0] = vgetq_lane_s32(_sum1, 1);
output6_tm[0] = vgetq_lane_s32(_sum1, 2);
output7_tm[0] = vgetq_lane_s32(_sum1, 3);
output0_tm += 1;
output1_tm += 1;
output2_tm += 1;
output3_tm += 1;
output4_tm += 1;
output5_tm += 1;
output6_tm += 1;
output7_tm += 1;
}
}
}
remain_outch_start += nn_outch << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
int* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p / 8 + p % 8);
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
#if __aarch64__
for (; i + 7 < tiles; i += 8)
{
const short* r0 = bb2.row<const short>(i / 8);
const short* kptr = kernel0_tm.row<const short>(r);
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
for (int q = 0; q < inch; q++)
{
int16x8_t _r0 = vld1q_s16(r0);
int16x8_t _r1 = vld1q_s16(r0 + 8);
int16x8_t _r2 = vld1q_s16(r0 + 16);
int16x8_t _r3 = vld1q_s16(r0 + 24);
int16x8_t _r4 = vld1q_s16(r0 + 32);
int16x8_t _r5 = vld1q_s16(r0 + 40);
int16x8_t _r6 = vld1q_s16(r0 + 48);
int16x8_t _r7 = vld1q_s16(r0 + 56);
int16x8_t _k0 = vld1q_s16(kptr);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r0), vget_low_s16(_k0), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_r0), vget_low_s16(_k0), 0);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_r1), vget_low_s16(_k0), 1);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_r1), vget_low_s16(_k0), 1);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r2), vget_low_s16(_k0), 2);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_r2), vget_low_s16(_k0), 2);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_r3), vget_low_s16(_k0), 3);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_r3), vget_low_s16(_k0), 3);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r4), vget_high_s16(_k0), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_r4), vget_high_s16(_k0), 0);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_r5), vget_high_s16(_k0), 1);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_r5), vget_high_s16(_k0), 1);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r6), vget_high_s16(_k0), 2);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_r6), vget_high_s16(_k0), 2);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_r7), vget_high_s16(_k0), 3);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_r7), vget_high_s16(_k0), 3);
kptr += 8;
r0 += 64;
}
_sum0 = vaddq_s32(_sum0, _sum2);
_sum1 = vaddq_s32(_sum1, _sum3);
vst1q_s32(output0_tm, _sum0);
vst1q_s32(output0_tm + 4, _sum1);
output0_tm += 8;
}
#endif
for (; i + 3 < tiles; i += 4)
{
#if __aarch64__
const short* r0 = bb2.row<const short>(i / 8 + (i % 8) / 4);
#else
const short* r0 = bb2.row<const short>(i / 4);
#endif
const short* kptr = kernel0_tm.row<const short>(r);
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
for (int q = 0; q < inch; q++)
{
int16x8_t _r0 = vld1q_s16(r0);
int16x8_t _r1 = vld1q_s16(r0 + 8);
int16x8_t _r2 = vld1q_s16(r0 + 16);
int16x8_t _r3 = vld1q_s16(r0 + 24);
int16x8_t _k0 = vld1q_s16(kptr);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r0), vget_low_s16(_k0), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_r0), vget_low_s16(_k0), 1);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r1), vget_low_s16(_k0), 2);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_r1), vget_low_s16(_k0), 3);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r2), vget_high_s16(_k0), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_r2), vget_high_s16(_k0), 1);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r3), vget_high_s16(_k0), 2);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_r3), vget_high_s16(_k0), 3);
kptr += 8;
r0 += 32;
}
int32x4_t _sum01 = vaddq_s32(_sum0, _sum1);
vst1q_s32(output0_tm, _sum01);
output0_tm += 4;
}
for (; i < tiles; i++)
{
#if __aarch64__
const short* r0 = bb2.row<const short>(i / 8 + (i % 8) / 4 + i % 4);
#else
const short* r0 = bb2.row<const short>(i / 4 + i % 4);
#endif
const short* kptr = kernel0_tm.row<const short>(r);
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
for (int q = 0; q < inch; q++)
{
int16x8_t _r0 = vld1q_s16(r0);
int16x8_t _k0 = vld1q_s16(kptr);
_sum0 = vmlal_s16(_sum0, vget_low_s16(_r0), vget_low_s16(_k0));
_sum1 = vmlal_s16(_sum1, vget_high_s16(_r0), vget_high_s16(_k0));
kptr += 8;
r0 += 8;
}
int32x4_t _sum = vaddq_s32(_sum0, _sum1);
#if __aarch64__
int sum = vaddvq_s32(_sum); // dot
#else
int32x2_t _ss = vadd_s32(vget_low_s32(_sum), vget_high_s32(_sum));
_ss = vpadd_s32(_ss, _ss);
int sum = vget_lane_s32(_ss, 0);
#endif
output0_tm[0] = sum;
output0_tm++;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, 4u, 1, opt.workspace_allocator);
}
{
// const float otm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + (r01 + r02) + (r03 + r04)
// 1 = (r01 - r02) + (r03 - r04) * 2
// 2 = (r01 + r02) + (r03 + r04) * 4
// 3 = r05 + (r01 - r02) + (r03 - r04) * 8
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = w_tm / 6 * h_tm / 6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
int tmp[4][6];
// tile
for (int i = 0; i < outh / 4; i++)
{
for (int j = 0; j < outw / 4; j++)
{
// top_blob_tm.create(tiles, 36, outch, 4u, 1, opt.workspace_allocator);
const int* output0_tm_0 = (const int*)out0_tm + (i * w_tm / 6 + j) * 1;
const int* output0_tm_1 = output0_tm_0 + tiles * 1;
const int* output0_tm_2 = output0_tm_0 + tiles * 2;
const int* output0_tm_3 = output0_tm_0 + tiles * 3;
const int* output0_tm_4 = output0_tm_0 + tiles * 4;
const int* output0_tm_5 = output0_tm_0 + tiles * 5;
int* output0 = out0.row<int>(i * 4) + j * 4;
// 0 = r00 + (r01 + r02) + (r03 + r04)
// 1 = (r01 - r02) + (r03 - r04) * 2
// 2 = (r01 + r02) + (r03 + r04) * 4
// 3 = r05 + (r01 - r02) + (r03 - r04) * 8
// TODO neon optimize
for (int m = 0; m < 5; m++)
{
int tmp02a = output0_tm_1[0] + output0_tm_2[0];
int tmp13a = output0_tm_1[0] - output0_tm_2[0];
int tmp02b = output0_tm_3[0] + output0_tm_4[0];
int tmp13b = output0_tm_3[0] - output0_tm_4[0];
tmp[0][m] = output0_tm_0[0] + tmp02a + tmp02b;
tmp[1][m] = tmp13a + tmp13b * 2;
tmp[2][m] = tmp02a + tmp02b * 4;
tmp[3][m] = output0_tm_5[0] * 4 + tmp13a + tmp13b * 8;
output0_tm_0 += tiles * 6;
output0_tm_1 += tiles * 6;
output0_tm_2 += tiles * 6;
output0_tm_3 += tiles * 6;
output0_tm_4 += tiles * 6;
output0_tm_5 += tiles * 6;
}
for (int m = 5; m < 6; m++)
{
int tmp02a = output0_tm_1[0] + output0_tm_2[0];
int tmp13a = output0_tm_1[0] - output0_tm_2[0];
int tmp02b = output0_tm_3[0] + output0_tm_4[0];
int tmp13b = output0_tm_3[0] - output0_tm_4[0];
tmp[0][m] = (output0_tm_0[0] + tmp02a + tmp02b) * 4;
tmp[1][m] = (tmp13a + tmp13b * 2) * 4;
tmp[2][m] = (tmp02a + tmp02b * 4) * 4;
tmp[3][m] = (output0_tm_5[0] * 4 + tmp13a + tmp13b * 8) * 4;
output0_tm_0 += tiles * 6;
output0_tm_1 += tiles * 6;
output0_tm_2 += tiles * 6;
output0_tm_3 += tiles * 6;
output0_tm_4 += tiles * 6;
output0_tm_5 += tiles * 6;
}
for (int m = 0; m < 4; m++)
{
const int* tmp0 = tmp[m];
int tmp02a = tmp0[1] + tmp0[2];
int tmp13a = tmp0[1] - tmp0[2];
int tmp02b = tmp0[3] + tmp0[4];
int tmp13b = tmp0[3] - tmp0[4];
output0[0] = (tmp0[0] + tmp02a + tmp02b) / 576;
output0[1] = (tmp13a + tmp13b * 2) / 576;
output0[2] = (tmp02a + tmp02b * 4) / 576;
output0[3] = (tmp0[5] + tmp13a + tmp13b * 8) / 576;
output0 += outw;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
|
nest_lock.c | // RUN: %libomp-compile-and-run | FileCheck %s
// REQUIRES: ompt
#include "callback.h"
#include <omp.h>
int main()
{
//need to use an OpenMP construct so that OMPT will be initalized
#pragma omp parallel num_threads(1)
print_ids(0);
omp_nest_lock_t nest_lock;
printf("%" PRIu64 ": &nest_lock: %lli\n", ompt_get_thread_data()->value, (long long) &nest_lock);
omp_init_nest_lock(&nest_lock);
print_fuzzy_address(1);
omp_set_nest_lock(&nest_lock);
print_fuzzy_address(2);
omp_set_nest_lock(&nest_lock);
print_fuzzy_address(3);
omp_unset_nest_lock(&nest_lock);
print_fuzzy_address(4);
omp_unset_nest_lock(&nest_lock);
print_fuzzy_address(5);
omp_destroy_nest_lock(&nest_lock);
print_fuzzy_address(6);
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquire'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquired'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_released'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_nest_lock'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_init_nest_lock: wait_id=[[WAIT_ID:[0-9]+]], hint={{[0-9]+}}, impl={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK-NEXT: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_nest_lock: wait_id=[[WAIT_ID]], hint={{[0-9]+}}, impl={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_acquired_nest_lock_first: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS]]{{[0-f][0-f]}}
// CHECK-NEXT: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_nest_lock: wait_id=[[WAIT_ID]], hint={{[0-9]+}}, impl={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_acquired_nest_lock_next: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS]]
// CHECK-NEXT: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_release_nest_lock_prev: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK-NEXT: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_release_nest_lock_last: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK-NEXT: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_destroy_nest_lock: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK-NEXT: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
return 0;
}
|
findmax.c | #include<stdio.h>
#include<stdlib.h>
#include "generic.h"
#define size 10000
#define NT 8
int arr[size];
int flag[size];//to set flag[i]==1 if arr[i] is maximum
int main(int argc, char *argv[]){
srand(atoi(argv[1]));//Seed for random number command line integer value
//generates random number
for(int i=0;i<size;i++)arr[i]=rand()%1048576;
//initialize flag[i]=1 for 0<=i<=size
for(int i=0;i<size;i++) flag[i]=1;
double t1=rtclock();
#pragma omp parallel for num_threads(NT)
for(int i=0;i<size;i++)
for(int j=0;j<size;j++)
//if arr[i] is not maximum set flag[i]=0
if(arr[i]<arr[j])flag[i]=0;
double t2=rtclock();
printf("\nTIME =%f \n",(t2-t1)*1000);
//print maximum element arr[i] for which flag[i] still 1.
for(int i=0;i<size;i++)if(flag[i]==1)printf("arr[%d]= %d\n",i,arr[i]);
}
/*Run executable-path <integer-seed-value>
*example: ./a.out 3 */
|
DRB076-flush-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
This benchmark is extracted from flush_nolist.1c of OpenMP
Application Programming Interface Examples Version 4.5.0 .
We privatize variable i to fix data races in the original example.
Once i is privatized, flush is no longer needed.
*/
#include "omprace.h"
#include <omp.h>
#include<stdio.h>
#include<assert.h>
void f1(int *q)
{
*q = 1;
}
int main()
{
omprace_init();
int i=0, sum=0;
#pragma omp parallel reduction(+:sum) num_threads(10) private(i)
{
f1(&i);
sum+= i;
}
assert (sum==10);
printf("sum=%d\n", sum);
omprace_fini();
return 0;
}
|
grid_ao_drv.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <complex.h>
#include "config.h"
#include "grid_ao_drv.h"
#define MIN(X,Y) ((X)<(Y)?(X):(Y))
#define MAX(X,Y) ((X)>(Y)?(X):(Y))
double exp_cephes(double x);
double CINTcommon_fac_sp(int l);
void GTOnabla1(double *fx1, double *fy1, double *fz1,
double *fx0, double *fy0, double *fz0, int l, double a)
{
int i, n;
double a2 = -2 * a;
for (n = 0; n < SIMDD; n++) {
fx1[n] = a2*fx0[SIMDD+n];
fy1[n] = a2*fy0[SIMDD+n];
fz1[n] = a2*fz0[SIMDD+n];
}
for (i = 1; i <= l; i++) {
for (n = 0; n < SIMDD; n++) {
fx1[i*SIMDD+n] = i*fx0[(i-1)*SIMDD+n] + a2*fx0[(i+1)*SIMDD+n];
fy1[i*SIMDD+n] = i*fy0[(i-1)*SIMDD+n] + a2*fy0[(i+1)*SIMDD+n];
fz1[i*SIMDD+n] = i*fz0[(i-1)*SIMDD+n] + a2*fz0[(i+1)*SIMDD+n];
} }
}
/*
* r - R_O = (r-R_i) + ri, ri = (x,y,z) = R_i - R_O
*/
void GTOx1(double *fx1, double *fy1, double *fz1,
double *fx0, double *fy0, double *fz0, int l, double *ri)
{
int i, n;
for (i = 0; i <= l; i++) {
for (n = 0; n < SIMDD; n++) {
fx1[i*SIMDD+n] = ri[0] * fx0[i*SIMDD+n] + fx0[(i+1)*SIMDD+n];
fy1[i*SIMDD+n] = ri[1] * fy0[i*SIMDD+n] + fy0[(i+1)*SIMDD+n];
fz1[i*SIMDD+n] = ri[2] * fz0[i*SIMDD+n] + fz0[(i+1)*SIMDD+n];
} }
}
int GTOprim_exp(double *eprim, double *coord, double *alpha, double *coeff,
int l, int nprim, int nctr, size_t ngrids, double fac)
{
int i, j;
double arr, maxc;
double logcoeff[nprim];
double rr[ngrids];
double *gridx = coord;
double *gridy = coord+BLKSIZE;
double *gridz = coord+BLKSIZE*2;
int not0 = 0;
// the maximum value of the coefficients for each pGTO
for (j = 0; j < nprim; j++) {
maxc = 0;
for (i = 0; i < nctr; i++) {
maxc = MAX(maxc, fabs(coeff[i*nprim+j]));
}
logcoeff[j] = log(maxc);
}
for (i = 0; i < ngrids; i++) {
rr[i] = gridx[i]*gridx[i] + gridy[i]*gridy[i] + gridz[i]*gridz[i];
}
for (j = 0; j < nprim; j++) {
for (i = 0; i < ngrids; i++) {
arr = alpha[j] * rr[i];
if (arr-logcoeff[j] < EXPCUTOFF) {
eprim[j*BLKSIZE+i] = exp_cephes(-arr) * fac;
not0 = 1;
} else {
eprim[j*BLKSIZE+i] = 0;
}
}
}
return not0;
}
// grid2atm[atm_id,xyz,grid_id]
static void _fill_grid2atm(double *grid2atm, double *coord, size_t bgrids, size_t ngrids,
int *atm, int natm, int *bas, int nbas, double *env)
{
int atm_id;
size_t ig;
double *r_atm;
for (atm_id = 0; atm_id < natm; atm_id++) {
r_atm = env + atm[PTR_COORD+atm_id*ATM_SLOTS];
for (ig = 0; ig < bgrids; ig++) {
grid2atm[0*BLKSIZE+ig] = coord[0*ngrids+ig] - r_atm[0];
grid2atm[1*BLKSIZE+ig] = coord[1*ngrids+ig] - r_atm[1];
grid2atm[2*BLKSIZE+ig] = coord[2*ngrids+ig] - r_atm[2];
}
grid2atm += 3*BLKSIZE;
}
}
static void _dset0(double *out, size_t odim, size_t bgrids, int counts)
{
size_t i, j;
for (i = 0; i < counts; i++) {
for (j = 0; j < bgrids; j++) {
out[i*odim+j] = 0;
}
}
}
static void _zset0(double complex *out, size_t odim, size_t bgrids, int counts)
{
size_t i, j;
for (i = 0; i < counts; i++) {
for (j = 0; j < bgrids; j++) {
out[i*odim+j] = 0;
}
}
}
void GTOeval_sph_iter(FPtr_eval feval, FPtr_exp fexp, double fac,
size_t nao, size_t ngrids, size_t bgrids,
int param[], int *shls_slice, int *ao_loc, double *buf,
double *ao, double *coord, char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ncomp = param[TENSOR];
const int sh0 = shls_slice[0];
const int sh1 = shls_slice[1];
const int atmstart = bas[sh0*BAS_SLOTS+ATOM_OF];
const int atmend = bas[(sh1-1)*BAS_SLOTS+ATOM_OF]+1;
const int atmcount = atmend - atmstart;
int i, k, l, np, nc, atm_id, bas_id, deg, dcart, ao_id;
size_t di;
double fac1;
double *p_exp, *pcoeff, *pcoord, *pcart, *ri, *pao;
double *grid2atm = buf; // [atm_id,xyz,grid]
double *eprim = grid2atm + atmcount*3*BLKSIZE;
double *cart_gto = eprim + NPRIMAX*BLKSIZE*2;
_fill_grid2atm(grid2atm, coord, bgrids, ngrids,
atm+atmstart*ATM_SLOTS, atmcount, bas, nbas, env);
for (bas_id = sh0; bas_id < sh1; bas_id++) {
np = bas[bas_id*BAS_SLOTS+NPRIM_OF];
nc = bas[bas_id*BAS_SLOTS+NCTR_OF ];
l = bas[bas_id*BAS_SLOTS+ANG_OF ];
deg = l * 2 + 1;
fac1 = fac * CINTcommon_fac_sp(l);
p_exp = env + bas[bas_id*BAS_SLOTS+PTR_EXP];
pcoeff = env + bas[bas_id*BAS_SLOTS+PTR_COEFF];
atm_id = bas[bas_id*BAS_SLOTS+ATOM_OF];
pcoord = grid2atm + (atm_id - atmstart) * 3*BLKSIZE;
ao_id = ao_loc[bas_id] - ao_loc[sh0];
if (non0table[bas_id] &&
(*fexp)(eprim, pcoord, p_exp, pcoeff, l, np, nc, bgrids, fac1)) {
dcart = (l+1)*(l+2)/2;
di = nc * dcart;
ri = env + atm[PTR_COORD+atm_id*ATM_SLOTS];
if (l <= 1) { // s, p functions
(*feval)(ao+ao_id*ngrids, ri, eprim, pcoord, p_exp, pcoeff,
env, l, np, nc, nao, ngrids, bgrids);
} else {
(*feval)(cart_gto, ri, eprim, pcoord, p_exp, pcoeff,
env, l, np, nc, di, bgrids, bgrids);
pcart = cart_gto;
for (i = 0; i < ncomp; i++) {
pao = ao + (i*nao+ao_id)*ngrids;
for (k = 0; k < nc; k++) {
CINTc2s_ket_sph1(pao, pcart,
ngrids, bgrids, l);
pao += deg * ngrids;
pcart += dcart * bgrids;
}
}
}
} else {
for (i = 0; i < ncomp; i++) {
_dset0(ao+(i*nao+ao_id)*ngrids, ngrids, bgrids, nc*deg);
}
}
}
}
void GTOeval_cart_iter(FPtr_eval feval, FPtr_exp fexp, double fac,
size_t nao, size_t ngrids, size_t bgrids,
int param[], int *shls_slice, int *ao_loc, double *buf,
double *ao, double *coord, char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ncomp = param[TENSOR];
const int sh0 = shls_slice[0];
const int sh1 = shls_slice[1];
const int atmstart = bas[sh0*BAS_SLOTS+ATOM_OF];
const int atmend = bas[(sh1-1)*BAS_SLOTS+ATOM_OF]+1;
const int atmcount = atmend - atmstart;
int i, l, np, nc, atm_id, bas_id, deg, ao_id;
double fac1;
double *p_exp, *pcoeff, *pcoord, *ri;
double *grid2atm = buf; // [atm_id,xyz,grid]
double *eprim = grid2atm + atmcount*3*BLKSIZE;
_fill_grid2atm(grid2atm, coord, bgrids, ngrids,
atm+atmstart*ATM_SLOTS, atmcount, bas, nbas, env);
for (bas_id = sh0; bas_id < sh1; bas_id++) {
np = bas[bas_id*BAS_SLOTS+NPRIM_OF];
nc = bas[bas_id*BAS_SLOTS+NCTR_OF ];
l = bas[bas_id*BAS_SLOTS+ANG_OF ];
deg = (l+1)*(l+2)/2;
fac1 = fac * CINTcommon_fac_sp(l);
p_exp = env + bas[bas_id*BAS_SLOTS+PTR_EXP];
pcoeff = env + bas[bas_id*BAS_SLOTS+PTR_COEFF];
atm_id = bas[bas_id*BAS_SLOTS+ATOM_OF];
pcoord = grid2atm + (atm_id - atmstart) * 3*BLKSIZE;
ao_id = ao_loc[bas_id] - ao_loc[sh0];
if (non0table[bas_id] &&
(*fexp)(eprim, pcoord, p_exp, pcoeff, l, np, nc, bgrids, fac1)) {
ri = env + atm[PTR_COORD+atm_id*ATM_SLOTS];
(*feval)(ao+ao_id*ngrids, ri, eprim, pcoord, p_exp, pcoeff,
env, l, np, nc, nao, ngrids, bgrids);
} else {
for (i = 0; i < ncomp; i++) {
_dset0(ao+(i*nao+ao_id)*ngrids, ngrids, bgrids, nc*deg);
}
}
}
}
void GTOeval_spinor_iter(FPtr_eval feval, FPtr_exp fexp, void (*c2s)(), double fac,
size_t nao, size_t ngrids, size_t bgrids,
int param[], int *shls_slice, int *ao_loc, double *buf,
double complex *ao, double *coord, char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ncomp_e1 = param[POS_E1];
const int ncomp = param[TENSOR];
const int sh0 = shls_slice[0];
const int sh1 = shls_slice[1];
const int atmstart = bas[sh0*BAS_SLOTS+ATOM_OF];
const int atmend = bas[(sh1-1)*BAS_SLOTS+ATOM_OF]+1;
const int atmcount = atmend - atmstart;
int i, l, np, nc, atm_id, bas_id, deg, kappa, dcart, ao_id;
size_t off, di;
double fac1;
double *p_exp, *pcoeff, *pcoord, *pcart, *ri;
double complex *aoa = ao;
double complex *aob = ao + ncomp*nao*ngrids;
double *grid2atm = buf; // [atm_id,xyz,grid]
double *eprim = grid2atm + atmcount*3*BLKSIZE;
double *cart_gto = eprim + NPRIMAX*BLKSIZE*2;
_fill_grid2atm(grid2atm, coord, bgrids, ngrids,
atm+atmstart*ATM_SLOTS, atmcount, bas, nbas, env);
for (bas_id = sh0; bas_id < sh1; bas_id++) {
np = bas[bas_id*BAS_SLOTS+NPRIM_OF];
nc = bas[bas_id*BAS_SLOTS+NCTR_OF ];
l = bas[bas_id*BAS_SLOTS+ANG_OF ];
deg = CINTlen_spinor(bas_id, bas);
fac1 = fac * CINTcommon_fac_sp(l);
p_exp = env + bas[bas_id*BAS_SLOTS+PTR_EXP];
pcoeff = env + bas[bas_id*BAS_SLOTS+PTR_COEFF];
atm_id = bas[bas_id*BAS_SLOTS+ATOM_OF];
pcoord = grid2atm + (atm_id - atmstart) * 3*BLKSIZE;
ao_id = ao_loc[bas_id] - ao_loc[sh0];
if (non0table[bas_id] &&
(*fexp)(eprim, pcoord, p_exp, pcoeff, l, np, nc, bgrids, fac1)) {
kappa = bas[bas_id*BAS_SLOTS+KAPPA_OF];
dcart = (l+1)*(l+2)/2;
di = nc * dcart;
ri = env + atm[PTR_COORD+atm_id*ATM_SLOTS];
(*feval)(cart_gto, ri, eprim, pcoord, p_exp, pcoeff,
env, l, np, nc, di, bgrids, bgrids);
for (i = 0; i < ncomp; i++) {
pcart = cart_gto + i * di*bgrids*ncomp_e1;
off = (i*nao+ao_id)*ngrids;
(*c2s)(aoa+off, aob+off, pcart,
ngrids, bgrids, nc, kappa, l);
}
} else {
for (i = 0; i < ncomp; i++) {
off = (i*nao+ao_id)*ngrids;
_zset0(aoa+off, ngrids, bgrids, nc*deg);
_zset0(aob+off, ngrids, bgrids, nc*deg);
}
}
}
}
int GTOshloc_by_atom(int *shloc, int *shls_slice, int *ao_loc, int *atm, int *bas)
{
const int sh0 = shls_slice[0];
const int sh1 = shls_slice[1];
int ish, nshblk, lastatm;
shloc[0] = sh0;
nshblk = 1;
lastatm = bas[BAS_SLOTS*sh0+ATOM_OF];
for (ish = sh0; ish < sh1; ish++) {
if (lastatm != bas[BAS_SLOTS*ish+ATOM_OF]) {
lastatm = bas[BAS_SLOTS*ish+ATOM_OF];
shloc[nshblk] = ish;
nshblk++;
}
}
shloc[nshblk] = sh1;
return nshblk;
}
/*
* non0table[ngrids/blksize,natm] is the T/F table for ao values to
* screen the ao evaluation for each shell
*/
void GTOeval_loop(void (*fiter)(), FPtr_eval feval, FPtr_exp fexp, double fac,
int ngrids, int param[], int *shls_slice, int *ao_loc,
double *ao, double *coord, char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
int shloc[shls_slice[1]-shls_slice[0]+1];
const int nshblk = GTOshloc_by_atom(shloc, shls_slice, ao_loc, atm, bas);
const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE;
const size_t Ngrids = ngrids;
#pragma omp parallel
{
const int sh0 = shls_slice[0];
const int sh1 = shls_slice[1];
const size_t nao = ao_loc[sh1] - ao_loc[sh0];
int ip, ib, k, iloc, ish;
size_t aoff, bgrids;
int ncart = NCTR_CART * param[TENSOR] * param[POS_E1];
double *buf = malloc(sizeof(double) * BLKSIZE*(NPRIMAX*2+ncart));
#pragma omp for schedule(dynamic, 4)
for (k = 0; k < nblk*nshblk; k++) {
iloc = k / nblk;
ish = shloc[iloc];
aoff = ao_loc[ish] - ao_loc[sh0];
ib = k - iloc * nblk;
ip = ib * BLKSIZE;
bgrids = MIN(ngrids-ip, BLKSIZE);
(*fiter)(feval, fexp, fac, nao, Ngrids, bgrids,
param, shloc+iloc, ao_loc, buf, ao+aoff*Ngrids+ip,
coord+ip, non0table+ib*nbas,
atm, natm, bas, nbas, env);
}
free(buf);
}
}
void GTOeval_sph_drv(FPtr_eval feval, FPtr_exp fexp, double fac, int ngrids,
int param[], int *shls_slice, int *ao_loc,
double *ao, double *coord, char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
GTOeval_loop(GTOeval_sph_iter, feval, fexp, fac, ngrids,
param, shls_slice, ao_loc,
ao, coord, non0table, atm, natm, bas, nbas, env);
}
void GTOeval_cart_drv(FPtr_eval feval, FPtr_exp fexp, double fac, int ngrids,
int param[], int *shls_slice, int *ao_loc,
double *ao, double *coord, char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
GTOeval_loop(GTOeval_cart_iter, feval, fexp, fac, ngrids,
param, shls_slice, ao_loc,
ao, coord, non0table, atm, natm, bas, nbas, env);
}
void GTOeval_spinor_drv(FPtr_eval feval, FPtr_exp fexp, void (*c2s)(), double fac,
int ngrids, int param[], int *shls_slice, int *ao_loc,
double complex *ao, double *coord, char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
int shloc[shls_slice[1]-shls_slice[0]+1];
const int nshblk = GTOshloc_by_atom(shloc, shls_slice, ao_loc, atm, bas);
const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE;
const size_t Ngrids = ngrids;
#pragma omp parallel
{
const int sh0 = shls_slice[0];
const int sh1 = shls_slice[1];
const size_t nao = ao_loc[sh1] - ao_loc[sh0];
int ip, ib, k, iloc, ish;
size_t aoff, bgrids;
int ncart = NCTR_CART * param[TENSOR] * param[POS_E1];
double *buf = malloc(sizeof(double) * BLKSIZE*(NPRIMAX*2+ncart));
#pragma omp for schedule(dynamic, 4)
for (k = 0; k < nblk*nshblk; k++) {
iloc = k / nblk;
ish = shloc[iloc];
aoff = ao_loc[ish] - ao_loc[sh0];
ib = k - iloc * nblk;
ip = ib * BLKSIZE;
bgrids = MIN(ngrids-ip, BLKSIZE);
GTOeval_spinor_iter(feval, fexp, c2s, fac,
nao, Ngrids, bgrids,
param, shloc+iloc, ao_loc, buf, ao+aoff*Ngrids+ip,
coord+ip, non0table+ib*nbas,
atm, natm, bas, nbas, env);
}
free(buf);
}
}
|
reciprocal_to_normal.c | /* Copyright (C) 2015 Atsushi Togo */
/* All rights reserved. */
/* This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include <stdlib.h>
#include <math.h>
#include "reciprocal_to_normal.h"
#include "lapack_wrapper.h"
#ifdef MEASURE_R2N
#include <unistd.h>
#include <time.h>
#endif
static lapack_complex_double fc3_sum_in_reciprocal_to_normal
(const long bi0,
const long bi1,
const long bi2,
const lapack_complex_double *eigvecs0,
const lapack_complex_double *eigvecs1,
const lapack_complex_double *eigvecs2,
const lapack_complex_double *fc3_reciprocal,
const double *masses,
const long num_atom);
static double get_fc3_sum
(const long j,
const long k,
const long bi,
const double *freqs0,
const double *freqs1,
const double *freqs2,
const lapack_complex_double *eigvecs0,
const lapack_complex_double *eigvecs1,
const lapack_complex_double *eigvecs2,
const lapack_complex_double *fc3_reciprocal,
const double *masses,
const long num_atom,
const double cutoff_frequency);
void reciprocal_to_normal_squared
(double *fc3_normal_squared,
const long (*g_pos)[4],
const long num_g_pos,
const lapack_complex_double *fc3_reciprocal,
const double *freqs0,
const double *freqs1,
const double *freqs2,
const lapack_complex_double *eigvecs0,
const lapack_complex_double *eigvecs1,
const lapack_complex_double *eigvecs2,
const double *masses,
const long *band_indices,
const long num_band0,
const long num_band,
const double cutoff_frequency,
const long openmp_at_bands)
{
long i, num_atom;
#ifdef MEASURE_R2N
double loopTotalCPUTime, loopTotalWallTime;
time_t loopStartWallTime;
clock_t loopStartCPUTime;
#endif
num_atom = num_band / 3;
#ifdef MEASURE_R2N
loopStartWallTime = time(NULL);
loopStartCPUTime = clock();
#endif
#pragma omp parallel for if (openmp_at_bands)
for (i = 0; i < num_g_pos; i++) {
if (freqs0[band_indices[g_pos[i][0]]] > cutoff_frequency) {
fc3_normal_squared[g_pos[i][3]] = get_fc3_sum(g_pos[i][1],
g_pos[i][2],
band_indices[g_pos[i][0]],
freqs0,
freqs1,
freqs2,
eigvecs0,
eigvecs1,
eigvecs2,
fc3_reciprocal,
masses,
num_atom,
cutoff_frequency);
}
}
#ifdef MEASURE_R2N
loopTotalCPUTime = (double)(clock() - loopStartCPUTime) / CLOCKS_PER_SEC;
loopTotalWallTime = difftime(time(NULL), loopStartWallTime);
printf(" %1.3fs (%1.3fs CPU)\n", loopTotalWallTime, loopTotalCPUTime);
#endif
}
static double get_fc3_sum
(const long j,
const long k,
const long bi,
const double *freqs0,
const double *freqs1,
const double *freqs2,
const lapack_complex_double *eigvecs0,
const lapack_complex_double *eigvecs1,
const lapack_complex_double *eigvecs2,
const lapack_complex_double *fc3_reciprocal,
const double *masses,
const long num_atom,
const double cutoff_frequency)
{
double fff, sum_real, sum_imag;
lapack_complex_double fc3_sum;
if (freqs1[j] > cutoff_frequency && freqs2[k] > cutoff_frequency) {
fff = freqs0[bi] * freqs1[j] * freqs2[k];
fc3_sum = fc3_sum_in_reciprocal_to_normal
(bi, j, k,
eigvecs0, eigvecs1, eigvecs2,
fc3_reciprocal,
masses,
num_atom);
sum_real = lapack_complex_double_real(fc3_sum);
sum_imag = lapack_complex_double_imag(fc3_sum);
return (sum_real * sum_real + sum_imag * sum_imag) / fff;
} else {
return 0;
}
}
static lapack_complex_double fc3_sum_in_reciprocal_to_normal
(const long bi0,
const long bi1,
const long bi2,
const lapack_complex_double *eigvecs0,
const lapack_complex_double *eigvecs1,
const lapack_complex_double *eigvecs2,
const lapack_complex_double *fc3_reciprocal,
const double *masses,
const long num_atom)
{
long baseIndex, index_l, index_lm, i, j, k, l, m, n;
double sum_real, sum_imag, mmm, mass_l, mass_lm;
lapack_complex_double eig_prod, eig_prod1;
sum_real = 0;
sum_imag = 0;
for (l = 0; l < num_atom; l++) {
mass_l = masses[l];
index_l = l * num_atom * num_atom * 27;
for (m = 0; m < num_atom; m++) {
mass_lm = mass_l * masses[m];
index_lm = index_l + m * num_atom * 27;
for (i = 0; i < 3; i++) {
for (j = 0; j < 3; j++) {
eig_prod1 = phonoc_complex_prod
(eigvecs0[(l * 3 + i) * num_atom * 3 + bi0],
eigvecs1[(m * 3 + j) * num_atom * 3 + bi1]);
for (n = 0; n < num_atom; n++) {
mmm = 1.0 / sqrt(mass_lm * masses[n]);
baseIndex = index_lm + n * 27 + i * 9 + j * 3;
for (k = 0; k < 3; k++) {
eig_prod = phonoc_complex_prod
(eig_prod1, eigvecs2[(n * 3 + k) * num_atom * 3 + bi2]);
eig_prod = phonoc_complex_prod
(eig_prod, fc3_reciprocal[baseIndex + k]);
sum_real += lapack_complex_double_real(eig_prod) * mmm;
sum_imag += lapack_complex_double_imag(eig_prod) * mmm;
}
}
}
}
}
}
return lapack_make_complex_double(sum_real, sum_imag);
}
|
GB_binop__bclr_int64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bclr_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__bclr_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__bclr_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__bclr_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bclr_int64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bclr_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__bclr_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bclr_int64)
// C=scalar+B GB (_bind1st__bclr_int64)
// C=scalar+B' GB (_bind1st_tran__bclr_int64)
// C=A+scalar GB (_bind2nd__bclr_int64)
// C=A'+scalar GB (_bind2nd_tran__bclr_int64)
// C type: int64_t
// A type: int64_t
// A pattern? 0
// B type: int64_t
// B pattern? 0
// BinaryOp: cij = GB_BITCLR (aij, bij, int64_t, 64)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITCLR (x, y, int64_t, 64) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BCLR || GxB_NO_INT64 || GxB_NO_BCLR_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bclr_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bclr_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bclr_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bclr_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int64_t alpha_scalar ;
int64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int64_t *) alpha_scalar_in)) ;
beta_scalar = (*((int64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bclr_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bclr_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bclr_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bclr_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bclr_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITCLR (x, bij, int64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bclr_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITCLR (aij, y, int64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITCLR (x, aij, int64_t, 64) ; \
}
GrB_Info GB (_bind1st_tran__bclr_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITCLR (aij, y, int64_t, 64) ; \
}
GrB_Info GB (_bind2nd_tran__bclr_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Synchronization.h | //===- Synchronization.h - OpenMP synchronization utilities ------- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//
//===----------------------------------------------------------------------===//
#ifndef OMPTARGET_DEVICERTL_SYNCHRONIZATION_H
#define OMPTARGET_DEVICERTL_SYNCHRONIZATION_H
#include "Types.h"
namespace _OMP {
namespace synchronize {
/// Initialize the synchronization machinery. Must be called by all threads.
void init(bool IsSPMD);
/// Synchronize all threads in a warp identified by \p Mask.
void warp(LaneMaskTy Mask);
/// Synchronize all threads in a block.
void threads();
/// Synchronizing threads is allowed even if they all hit different instances of
/// `synchronize::threads()`. However, `synchronize::threadsAligned()` is more
/// restrictive in that it requires all threads to hit the same instance. The
/// noinline is removed by the openmp-opt pass and helps to preserve the
/// information till then.
///{
#pragma omp begin assumes ext_aligned_barrier
/// Synchronize all threads in a block, they are are reaching the same
/// instruction (hence all threads in the block are "aligned").
__attribute__((noinline)) void threadsAligned();
#pragma omp end assumes
///}
} // namespace synchronize
namespace fence {
/// Memory fence with \p Ordering semantics for the team.
void team(int Ordering);
/// Memory fence with \p Ordering semantics for the contention group.
void kernel(int Ordering);
/// Memory fence with \p Ordering semantics for the system.
void system(int Ordering);
} // namespace fence
namespace atomic {
/// Atomically load \p Addr with \p Ordering semantics.
uint32_t load(uint32_t *Addr, int Ordering);
/// Atomically store \p V to \p Addr with \p Ordering semantics.
void store(uint32_t *Addr, uint32_t V, int Ordering);
/// Atomically increment \p *Addr and wrap at \p V with \p Ordering semantics.
uint32_t inc(uint32_t *Addr, uint32_t V, int Ordering);
/// Atomically add \p V to \p *Addr with \p Ordering semantics.
uint32_t add(uint32_t *Addr, uint32_t V, int Ordering);
/// Atomically add \p V to \p *Addr with \p Ordering semantics.
uint64_t add(uint64_t *Addr, uint64_t V, int Ordering);
} // namespace atomic
} // namespace _OMP
#endif
|
GB_binop__isne_uint64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isne_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__isne_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__isne_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__isne_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_uint64)
// A*D function (colscale): GB (_AxD__isne_uint64)
// D*A function (rowscale): GB (_DxB__isne_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__isne_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__isne_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_uint64)
// C=scalar+B GB (_bind1st__isne_uint64)
// C=scalar+B' GB (_bind1st_tran__isne_uint64)
// C=A+scalar GB (_bind2nd__isne_uint64)
// C=A'+scalar GB (_bind2nd_tran__isne_uint64)
// C type: uint64_t
// A type: uint64_t
// A pattern? 0
// B type: uint64_t
// B pattern? 0
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNE || GxB_NO_UINT64 || GxB_NO_ISNE_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isne_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isne_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isne_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isne_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isne_uint64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isne_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint64_t alpha_scalar ;
uint64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isne_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isne_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isne_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isne_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isne_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isne_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__isne_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__isne_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
distribute_parallel_for_simd_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp -verify=expected,omp50 %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify=expected,omp50 %s -Wuninitialized
void xxx(int argc) {
int x; // expected-note {{initialize the variable 'x' to silence this warning}}
#pragma omp distribute parallel for simd
for (int i = 0; i < 10; ++i)
argc = x; // expected-warning {{variable 'x' is uninitialized when used here}}
}
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute parallel for simd'}}
#pragma omp distribute parallel for simd
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute parallel for simd'}}
#pragma omp distribute parallel for simd foo
void test_no_clause(void) {
int i;
#pragma omp distribute parallel for simd
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp distribute parallel for simd' must be a for loop}}
#pragma omp distribute parallel for simd
++i;
}
void test_branch_protected_scope(void) {
int i = 0;
L1:
++i;
int x[24];
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause(void) {
int i;
#pragma omp target
#pragma omp teams
// expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
#pragma omp distribute parallel for simd foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers(void) {
int i, x;
#pragma omp target
#pragma omp teams
// expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
#pragma omp distribute parallel for simd;
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
#pragma omp distribute parallel for simd firstprivate(x);
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
#pragma omp distribute parallel for simd private(x);
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
#pragma omp distribute parallel for simd, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo(void);
void test_safelen(void) {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected '('}}
#pragma omp distribute parallel for simd safelen
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd safelen()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp distribute parallel for simd safelen 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(4
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(4,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd safelen(4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(4 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd safelen(4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(4, 8)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{integer constant expression}}
#pragma omp distribute parallel for simd safelen(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{integer constant expression}}
#pragma omp distribute parallel for simd safelen(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd safelen(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd safelen(0)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd safelen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_simdlen(void) {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected '('}}
#pragma omp distribute parallel for simd simdlen
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd simdlen()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp distribute parallel for simd simdlen 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(4
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(4,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd simdlen(4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(4 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd simdlen(4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(4, 8)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{integer constant expression}}
#pragma omp distribute parallel for simd simdlen(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{integer constant expression}}
#pragma omp distribute parallel for simd simdlen(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd simdlen(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd simdlen(0)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd simdlen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_safelen_simdlen(void) {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp distribute parallel for simd simdlen(6) safelen(5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp distribute parallel for simd safelen(5) simdlen(6)
for (i = 0; i < 16; ++i)
;
}
void test_collapse(void) {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected '('}}
#pragma omp distribute parallel for simd collapse
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd collapse(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd collapse()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd collapse(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd collapse(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp distribute parallel for simd collapse 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
// expected-error@+1 {{integer constant expression}}
#pragma omp distribute parallel for simd collapse(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{integer constant expression}}
#pragma omp distribute parallel for simd collapse(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd collapse(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd collapse(0)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd collapse(2)
for (i = 0; i < 16; ++i)
for (int j = 0; j < 16; ++j)
// expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}}
#pragma omp distribute parallel for simd reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_linear(void) {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd linear(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd linear(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd linear(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd linear()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd linear(int)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected variable name}}
#pragma omp distribute parallel for simd linear(0)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp distribute parallel for simd linear(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{use of undeclared identifier 'x'}}
// expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp distribute parallel for simd linear(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp distribute parallel for simd linear(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_aligned(void) {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd aligned(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd aligned(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd aligned(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd aligned()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd aligned(int)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected variable name}}
#pragma omp distribute parallel for simd aligned(0)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp distribute parallel for simd aligned(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{use of undeclared identifier 'x'}}
// expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp distribute parallel for simd aligned(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp distribute parallel for simd aligned(x, y, z)
for (i = 0; i < 16; ++i)
;
int *x, y, z[25]; // expected-note 4 {{'y' defined here}}
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd aligned(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd aligned(z)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd aligned(x :)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd aligned(x :, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd aligned(x : 1)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd aligned(x : 2 * 2)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd aligned(x : 1, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd aligned(x : 1, y, z : 1)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp distribute parallel for simd aligned(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp distribute parallel for simd aligned(x, y, z)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-note@+2 {{defined as aligned}}
// expected-error@+1 {{a variable cannot appear in more than one aligned clause}}
#pragma omp distribute parallel for simd aligned(x) aligned(z, x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-note@+3 {{defined as aligned}}
// expected-error@+2 {{a variable cannot appear in more than one aligned clause}}
// expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp distribute parallel for simd aligned(x, y, z) aligned(y, z)
for (i = 0; i < 16; ++i)
;
}
void test_private(void) {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd private(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp distribute parallel for simd private(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 2 {{expected expression}}
#pragma omp distribute parallel for simd private(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd private()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd private(int)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected variable name}}
#pragma omp distribute parallel for simd private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate(void) {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd lastprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp distribute parallel for simd lastprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 2 {{expected expression}}
#pragma omp distribute parallel for simd lastprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd lastprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd lastprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected variable name}}
#pragma omp distribute parallel for simd lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate(void) {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd firstprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp distribute parallel for simd firstprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 2 {{expected expression}}
#pragma omp distribute parallel for simd firstprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd firstprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd firstprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected variable name}}
#pragma omp distribute parallel for simd firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
// expected-error@+3 {{lastprivate variable cannot be firstprivate}} expected-note@+3 {{defined as lastprivate}}
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 2 {{lastprivate variable cannot be firstprivate}} expected-note@+3 2 {{defined as lastprivate}}
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 3 {{lastprivate variable cannot be firstprivate}} expected-note@+3 3 {{defined as lastprivate}}
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages(void) {
float a[100], b[100], c[100];
#pragma omp target
#pragma omp teams
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp distribute parallel for simd
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
#pragma omp target
#pragma omp teams
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp distribute parallel for simd
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
void test_nontemporal(void) {
int i;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd nontemporal(
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 2 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd nontemporal(,
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 2 {{expected expression}}
#pragma omp distribute parallel for simd nontemporal(, )
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd nontemporal()
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd nontemporal(int)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} omp50-error@+1 {{expected variable name}}
#pragma omp distribute parallel for simd nontemporal(0)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp distribute parallel for simd nontemporal(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{use of undeclared identifier 'x'}}
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp distribute parallel for simd nontemporal(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp distribute parallel for simd nontemporal(x, y, z)
for (i = 0; i < 16; ++i)
;
int x, y;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd nontemporal(x :)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}}
#pragma omp distribute parallel for simd nontemporal(x :, )
for (i = 0; i < 16; ++i)
;
// omp50-note@+2 {{defined as nontemporal}}
// omp45-error@+1 2 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} omp50-error@+1 {{a variable cannot appear in more than one nontemporal clause}}
#pragma omp distribute parallel for simd nontemporal(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}}
#pragma omp distribute parallel for simd private(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}}
#pragma omp distribute parallel for simd nontemporal(x) private(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}}
#pragma omp distribute parallel for simd nontemporal(x, y : 0)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}}
#pragma omp distribute parallel for simd nontemporal(x) lastprivate(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}}
#pragma omp distribute parallel for simd lastprivate(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
#pragma omp distribute parallel for simd order // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp distribute parallel for simd'}} expected-error {{expected '(' after 'order'}}
for (int i = 0; i < 10; ++i)
;
#pragma omp distribute parallel for simd order( // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp distribute parallel for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}}
for (int i = 0; i < 10; ++i)
;
#pragma omp distribute parallel for simd order(none // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp distribute parallel for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}}
for (int i = 0; i < 10; ++i)
;
#pragma omp distribute parallel for simd order(concurrent // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp distribute parallel for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}}
for (int i = 0; i < 10; ++i)
;
#pragma omp distribute parallel for simd order(concurrent) // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp distribute parallel for simd'}}
for (int i = 0; i < 10; ++i)
;
}
|
mem.c | /* Copyright 2013-2015. The Regents of the University of California.
* Copyright 2016. Martin Uecker.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2012-2016 Martin Uecker <martin.uecker@med.uni-goettingen.de>
*
*/
#include <stdbool.h>
#include <assert.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "misc/misc.h"
#include "misc/debug.h"
#include "mem.h"
bool memcache = true;
void memcache_off(void)
{
memcache = false;
}
struct mem_s {
const void* ptr;
size_t len;
bool device;
bool free;
int device_id;
int thread_id;
struct mem_s* next;
};
static struct mem_s* mem_list = NULL;
static bool inside_p(const struct mem_s* rptr, const void* ptr)
{
return (ptr >= rptr->ptr) && (ptr < rptr->ptr + rptr->len);
}
static struct mem_s* search(const void* ptr, bool remove)
{
struct mem_s* rptr = NULL;
#pragma omp critical
{
struct mem_s** nptr = &mem_list;
while (true) {
rptr = *nptr;
if (NULL == rptr)
break;
if (inside_p(rptr, ptr)) {
if (remove)
*nptr = rptr->next;
break;
}
nptr = &(rptr->next);
}
}
return rptr;
}
static bool free_check_p(const struct mem_s* rptr, size_t size, int dev, int tid)
{
return (rptr->free && (rptr->device_id == dev) && (rptr->len >= size)
&& ((-1 == tid) || (rptr->thread_id == tid)));
}
static struct mem_s** find_free_unsafe(size_t size, int dev, int tid)
{
struct mem_s* rptr = NULL;
struct mem_s** nptr = &mem_list;
while (true) {
rptr = *nptr;
if (NULL == rptr)
break;
if (free_check_p(rptr, size, dev, tid))
break;
nptr = &(rptr->next);
}
return nptr;
}
static struct mem_s* find_free(size_t size, int dev)
{
struct mem_s* rptr = NULL;
#pragma omp critical
{
rptr = *find_free_unsafe(size, dev, -1);
if (NULL != rptr)
rptr->free = false;
}
return rptr;
}
static void insert(const void* ptr, size_t len, bool device, int dev)
{
PTR_ALLOC(struct mem_s, nptr);
nptr->ptr = ptr;
nptr->len = len;
nptr->device = device;
nptr->device_id = dev;
#ifdef _OPENMP
nptr->thread_id = omp_get_thread_num();
#else
nptr->thread_id = -1;
#endif
nptr->free = false;
#pragma omp critical
{
nptr->next = mem_list;
mem_list = PTR_PASS(nptr);
}
}
void memcache_clear(int dev, void (*device_free)(const void*x))
{
struct mem_s* nptr = NULL;
if (!memcache)
return;
do {
#pragma omp critical
{
#ifdef _OPENMP
int tid = omp_get_thread_num();
#else
int tid = -1;
#endif
struct mem_s** rptr = find_free_unsafe(0, dev, tid);
nptr = *rptr;
// remove from list
if (NULL != nptr)
*rptr = nptr->next;
}
if (NULL != nptr) {
assert(nptr->device);
debug_printf(DP_DEBUG3, "Freeing %ld bytes. (DID: %d TID: %d)\n\n",
nptr->len, nptr->device_id, nptr->thread_id);
device_free(nptr->ptr);
free(nptr);
}
} while (NULL != nptr);
}
bool mem_ondevice(const void* ptr)
{
if (NULL == ptr)
return false;
struct mem_s* p = search(ptr, false);
bool r = ((NULL != p) && p->device);
return r;
}
bool mem_device_accessible(const void* ptr)
{
struct mem_s* p = search(ptr, false);
return (NULL != p);
}
void mem_device_free(void* ptr, void (*device_free)(const void* ptr))
{
struct mem_s* nptr = search(ptr, !memcache);
assert(NULL != nptr);
assert(nptr->ptr == ptr);
assert(nptr->device);
if (memcache) {
assert(!nptr->free);
nptr->free = true;
} else {
device_free(ptr);
free(nptr);
}
}
void* mem_device_malloc(int device, long size, void* (*device_alloc)(size_t))
{
if (memcache) {
struct mem_s* nptr = find_free(size, device);
if (NULL != nptr) {
assert(nptr->device);
assert(!nptr->free);
#ifdef _OPENMP
nptr->thread_id = omp_get_thread_num();
#else
nptr->thread_id = -1;
#endif
return (void*)(nptr->ptr);
}
}
void* ptr = device_alloc(size);
insert(ptr, size, true, device);
return ptr;
}
|
calc_residual.h | #pragma omp target teams num_teams((NUM*NUM/2)/BLOCK_SIZE) thread_limit(BLOCK_SIZE)
{
Real sum_cache[BLOCK_SIZE];
#pragma omp parallel
{
int lid = omp_get_thread_num();
int tid = omp_get_team_num();
int gid = tid * BLOCK_SIZE + lid;
int row = (gid % (NUM/2)) + 1;
int col = (gid / (NUM/2)) + 1;
int NUM_2 = NUM >> 1;
Real p_ij, p_im1j, p_ip1j, p_ijm1, p_ijp1, rhs, res, res2;
// red point
p_ij = pres_red(col, row);
p_im1j = pres_black(col - 1, row);
p_ip1j = pres_black(col + 1, row);
p_ijm1 = pres_black(col, row - (col & 1));
p_ijp1 = pres_black(col, row + ((col + 1) & 1));
rhs = (((F(col, (2 * row) - (col & 1)) - F(col - 1, (2 * row) - (col & 1))) / dx)
+ ((G(col, (2 * row) - (col & 1)) - G(col, (2 * row) - (col & 1) - 1)) / dy)) / dt;
// calculate residual
res = ((p_ip1j - (TWO * p_ij) + p_im1j) / (dx * dx))
+ ((p_ijp1 - (TWO * p_ij) + p_ijm1) / (dy * dy)) - rhs;
// black point
p_ij = pres_black(col, row);
p_im1j = pres_red(col - 1, row);
p_ip1j = pres_red(col + 1, row);
p_ijm1 = pres_red(col, row - ((col + 1) & 1));
p_ijp1 = pres_red(col, row + (col & 1));
// right-hand side
rhs = (((F(col, (2 * row) - ((col + 1) & 1)) - F(col - 1, (2 * row) - ((col + 1) & 1))) / dx)
+ ((G(col, (2 * row) - ((col + 1) & 1)) - G(col, (2 * row) - ((col + 1) & 1) - 1)) / dy)) / dt;
// calculate residual
res2 = ((p_ip1j - (TWO * p_ij) + p_im1j) / (dx * dx))
+ ((p_ijp1 - (TWO * p_ij) + p_ijm1) / (dy * dy)) - rhs;
sum_cache[lid] = (res * res) + (res2 * res2);
// synchronize threads in block to ensure all residuals stored
#pragma omp barrier
// add up squared residuals for block
int i = BLOCK_SIZE >> 1;
while (i != 0) {
if (lid < i) {
sum_cache[lid] += sum_cache[lid + i];
}
#pragma omp barrier
i >>= 1;
}
// store block's summed residuals
if (lid == 0) {
res_arr[tid] = sum_cache[0];
}
}
}
|
colormap.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO L OOO RRRR M M AAA PPPP %
% C O O L O O R R MM MM A A P P %
% C O O L O O RRRR M M M AAAAA PPPP %
% C O O L O O R R M M A A P %
% CCCC OOO LLLLL OOO R R M M A A P %
% %
% %
% MagickCore Colormap Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% We use linked-lists because splay-trees do not currently support duplicate
% key / value pairs (.e.g X11 green compliance and SVG green compliance).
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/cache.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/client.h"
#include "MagickCore/configure.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/xml-tree.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImageColormap() allocates an image colormap and initializes
% it to a linear gray colorspace. If the image already has a colormap,
% it is replaced. AcquireImageColormap() returns MagickTrue if successful,
% otherwise MagickFalse if there is not enough memory.
%
% The format of the AcquireImageColormap method is:
%
% MagickBooleanType AcquireImageColormap(Image *image,const size_t colors,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colors: the number of colors in the image colormap.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AcquireImageColormap(Image *image,
const size_t colors,ExceptionInfo *exception)
{
register ssize_t
i;
/*
Allocate image colormap.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->colors=MagickMax(colors,1);
if (image->colormap == (PixelInfo *) NULL)
image->colormap=(PixelInfo *) AcquireQuantumMemory(image->colors+1,
sizeof(*image->colormap));
else
image->colormap=(PixelInfo *) ResizeQuantumMemory(image->colormap,
image->colors+1,sizeof(*image->colormap));
if (image->colormap == (PixelInfo *) NULL)
{
image->colors=0;
image->storage_class=DirectClass;
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
for (i=0; i < (ssize_t) image->colors; i++)
{
double
pixel;
GetPixelInfo(image,image->colormap+i);
pixel=(double) (i*(QuantumRange/MagickMax(colors-1,1)));
image->colormap[i].red=pixel;
image->colormap[i].green=pixel;
image->colormap[i].blue=pixel;
image->colormap[i].alpha=(MagickRealType) OpaqueAlpha;
image->colormap[i].alpha_trait=BlendPixelTrait;
}
return(SetImageStorageClass(image,PseudoClass,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C y c l e C o l o r m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CycleColormap() displaces an image's colormap by a given number of
% positions. If you cycle the colormap a number of times you can produce
% a psychodelic effect.
%
% WARNING: this assumes an images colormap is in a well know and defined
% order. Currently Imagemagick has no way of setting that order.
%
% The format of the CycleColormapImage method is:
%
% MagickBooleanType CycleColormapImage(Image *image,const ssize_t displace,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o displace: displace the colormap this amount.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType CycleColormapImage(Image *image,
const ssize_t displace,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == DirectClass)
(void) SetImageType(image,PaletteType,exception);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
ssize_t
index;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(ssize_t) (GetPixelIndex(image,q)+displace) % image->colors;
if (index < 0)
index+=(ssize_t) image->colors;
SetPixelIndex(image,(Quantum) index,q);
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S o r t C o l o r m a p B y I n t e n s i t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SortColormapByIntensity() sorts the colormap of a PseudoClass image by
% decreasing color intensity.
%
% The format of the SortColormapByIntensity method is:
%
% MagickBooleanType SortColormapByIntensity(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: A pointer to an Image structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const PixelInfo
*color_1,
*color_2;
int
intensity;
color_1=(const PixelInfo *) x;
color_2=(const PixelInfo *) y;
intensity=(int) GetPixelInfoIntensity((const Image *) NULL,color_2)-(int)
GetPixelInfoIntensity((const Image *) NULL,color_1);
return(intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
MagickExport MagickBooleanType SortColormapByIntensity(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
y;
unsigned short
*pixels;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->storage_class != PseudoClass)
return(MagickTrue);
/*
Allocate memory for pixel indexes.
*/
pixels=(unsigned short *) AcquireQuantumMemory((size_t) image->colors,
sizeof(*pixels));
if (pixels == (unsigned short *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Assign index values to colormap entries.
*/
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].alpha=(double) i;
/*
Sort image colormap by decreasing color popularity.
*/
qsort((void *) image->colormap,(size_t) image->colors,
sizeof(*image->colormap),IntensityCompare);
/*
Update image colormap indexes to sorted colormap order.
*/
for (i=0; i < (ssize_t) image->colors; i++)
pixels[(ssize_t) image->colormap[i].alpha]=(unsigned short) i;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
index;
register ssize_t
x;
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(Quantum) pixels[(ssize_t) GetPixelIndex(image,q)];
SetPixelIndex(image,index,q);
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (status == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
pixels=(unsigned short *) RelinquishMagickMemory(pixels);
return(status);
}
|
GB_binop__gt_fp32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__gt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__gt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__gt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__gt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_fp32)
// A*D function (colscale): GB (_AxD__gt_fp32)
// D*A function (rowscale): GB (_DxB__gt_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__gt_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__gt_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_fp32)
// C=scalar+B GB (_bind1st__gt_fp32)
// C=scalar+B' GB (_bind1st_tran__gt_fp32)
// C=A+scalar GB (_bind2nd__gt_fp32)
// C=A'+scalar GB (_bind2nd_tran__gt_fp32)
// C type: bool
// A type: float
// A pattern? 0
// B type: float
// B pattern? 0
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GT || GxB_NO_FP32 || GxB_NO_GT_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__gt_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__gt_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__gt_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__gt_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__gt_fp32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__gt_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
float alpha_scalar ;
float beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((float *) alpha_scalar_in)) ;
beta_scalar = (*((float *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__gt_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__gt_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__gt_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__gt_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__gt_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__gt_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__gt_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__gt_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
fill.c |
/* Fill the nonzero term of the A matrix */
#include <petscmat.h>
#include <petscvec.h>
#include <petscsnes.h>
#include <../src/mat/impls/baij/seq/baij.h>
#include <omp.h>
#include <ktime.h>
#include <geometry.h>
#ifdef __USE_HW_COUNTER
#include <perf.h>
#include <kperf.h>
#endif
#include <phy.h>
#include <kernel.h>
int
fill_mat(struct fill *restrict fill)
{
#ifdef __USE_HW_COUNTER
const struct fd fd = fill->perf_counters->fd;
struct counters start;
perf_read(fd, &start);
const uint64_t icycle = __rdtsc();
#endif
struct ktime ktime;
setktime(&ktime);
const double *restrict q = fill->q;
const struct geometry *restrict g = fill->g;
const struct ivals *restrict iv = fill->iv;
const struct ts *restrict ts = fill->ts;
Mat A = fill->A;
int ierr;
uint32_t i;
Mat_SeqBAIJ *restrict a = (Mat_SeqBAIJ *) A->data;
size_t sz = (a->bs2 * a->i[a->mbs]);
__assume_aligned(a->a, 64);
memset(a->a, 0, sz * sizeof(double));
size_t nnodes = g->n->sz;
size_t bsz = g->c->bsz;
double cfl = ts->cfl;
double *restrict area = g->n->area;
double *restrict cdt = ts->cdt;
/*
Loop over the nodes to compute the local indices of each row
and column to insert the values using PETSc routine
*/
#pragma omp parallel for
for(i = 0; i < nnodes; i++)
{
double tmp = area[i] / (cfl * cdt[i]);
uint32_t j;
for(j = 0; j < bsz; j++)
{
uint32_t idx = j + bsz * i;
/*
Inserts or adds values into certain locations of a matrix,
using a local ordering of the nodes
*/
MatSetValues(A, 1, (const int *) &idx, 1, (const int *) &idx,
(const double *) &tmp, ADD_VALUES);
}
}
struct edge *restrict eptr = g->e->eptr;
struct xyzn *restrict xyzn = g->e->xyzn;
uint32_t *restrict ie = g->s->ie;
uint32_t *restrict part = g->s->part;
#pragma omp parallel
{
uint32_t t = omp_get_thread_num();
uint32_t ie0 = ie[t];
uint32_t ie1 = ie[t+1];
uint32_t i;
for(i = ie0; i < ie1; i++)
{
uint32_t n0 = eptr->n0[i];
uint32_t n1 = eptr->n1[i];
double xn = xyzn->x0[i];
double yn = xyzn->x1[i];
double zn = xyzn->x2[i];
double ln = xyzn->x3[i];
/*
Now lets get our other 2 vectors
For first vector, use {1,0,0} and subtract off the component
in the direction of the face normal. If the inner product of
{1,0,0} is close to unity, use {0,1,0}
*/
double dot = xn;
double X1, Y1, Z1;
if(fabs(dot) < 0.95f)
{
X1 = 1.f - dot * xn;
Y1 = - dot * yn;
Z1 = - dot * zn;
}
else
{
dot = yn;
X1 = - dot * xn;
Y1 = 1.f - dot * yn;
Z1 = - dot * zn;
}
/* Normalize the first vector */
double size = X1 * X1;
size += Y1 * Y1;
size += Z1 * Z1;
size = sqrt(size);
X1 /= size;
Y1 /= size;
Z1 /= size;
/* Take cross-product of normal and V1 to get V2 */
double X2 = yn * Z1;
X2 -= zn * Y1;
double Y2 = zn * X1;
Y2 -= xn * Z1;
double Z2 = xn * Y1;
Z2 -= yn * X1;
/* Variables on left */
// Velocity u
double uL = q[bsz * n0 + 1];
// Velocity v
double vL = q[bsz * n0 + 2];
// Velocity w
double wL = q[bsz * n0 + 3];
double ubarL = xn * uL;
ubarL += yn * vL;
ubarL += zn * wL;
/* Variables on right */
// Velocity u
double uR = q[bsz * n1 + 1];
// Velocity v
double vR = q[bsz * n1 + 2];
// Velocity w
double wR = q[bsz * n1 + 3];
double ubarR = xn * uR;
ubarR += yn * vR;
ubarR += zn * wR;
/*
Now compute eigenvalues and |A| from averaged variables
Avergage variables
*/
double u = 0.5f * (uL + uR);
double v = 0.5f * (vL + vR);
double w = 0.5f * (wL + wR);
double ubar = xn * u;
ubar += yn * v;
ubar += zn * w;
double c2 = ubar * ubar + BETA;
double c = sqrt(c2);
/* Put in the eigenvalue smoothing stuff */
double eig1 = ln * fabs(ubar);
double eig2 = ln * fabs(ubar);
double eig3 = ln * fabs(ubar + c);
double eig4 = ln * fabs(ubar - c);
double phi1 = xn * BETA;
phi1 += u * ubar;
double phi2 = yn * BETA;
phi2 += v * ubar;
double phi3 = zn * BETA;
phi3 += w * ubar;
double phi4 = Y2 * phi3;
phi4 -= Z2 * phi2;
double phi5 = Z2 * phi1;
phi5 -= X2 * phi3;
double phi6 = X2 * phi2;
phi6 -= Y2 * phi1;
double phi7 = Z1 * phi2;
phi7 -= Y1 * phi3;
double phi8 = X1 * phi3;
phi8 -= Z1 * phi1;
double phi9 = Y1 * phi1;
phi9 -= X1 * phi2;
/* Components of T(inverse) (call this y) */
double c2inv = 1.f / c2;
double y11 = u * phi4;
y11 += v * phi5;
y11 += w * phi6;
y11 = -c2inv * y11 / BETA;
double y21 = u * phi7;
y21 += v * phi8;
y21 += w * phi9;
y21 = -c2inv * y21 / BETA;
double y31 = c2inv * (c - ubar);
y31 = 0.5f * y31 / BETA;
double y41 = c2inv * (c + ubar);
y41 = -0.5f * y41 / BETA;
double y12 = c2inv * phi4;
double y22 = c2inv * phi7;
double y32 = c2inv * 0.5f * xn;
double y42 = c2inv * 0.5f * xn;
double y13 = c2inv * phi5;
double y23 = c2inv * phi8;
double y33 = c2inv * 0.5f * yn;
double y43 = c2inv * 0.5f * yn;
double y14 = c2inv * phi6;
double y24 = c2inv * phi9;
double y34 = c2inv * 0.5f * zn;
double y44 = c2inv * 0.5f * zn;
/* Now get elements of T */
double t13 = c * BETA;
double t23 = u * (ubar + c);
t23 += xn * BETA;
double t33 = v * (ubar + c);
t33 += yn * BETA;
double t43 = w * (ubar + c);
t43 += zn * BETA;
double t14 = -c * BETA;
double t24 = u * (ubar - c);
t24 += xn * BETA;
double t34 = v * (ubar - c);
t34 += yn * BETA;
double t44 = w * (ubar - c);
t44 += zn * BETA;
/* Compute T * |lambda| * T(inv) */
double a11 = eig3 * t13 * y31;
a11 += eig4 * t14 * y41;
double a12 = eig3 * t13 * y32;
a12 += eig4 * t14 * y42;
double a13 = eig3 * t13 * y33;
a13 += eig4 * t14 * y43;
double a14 = eig3 * t13 * y34;
a14 += eig4 * t14 * y44;
double a21 = eig1 * X1 * y11;
a21 += eig2 * X2 * y21;
a21 += eig3 * t23 * y31;
a21 += eig4 * t24 * y41;
double a22 = eig1 * X1 * y12;
a22 += eig2 * X2 * y22;
a22 += eig3 * t23 * y32;
a22 += eig4 * t24 * y42;
double a23 = eig1 * X1 * y13;
a23 += eig2 * X2 * y23;
a23 += eig3 * t23 * y33;
a23 += eig4 * t24 * y43;
double a24 = eig1 * X1 * y14;
a24 += eig2 * X2 * y24;
a24 += eig3 * t23 * y34;
a24 += eig4 * t24 * y44;
double a31 = eig1 * Y1 * y11;
a31 += eig2 * Y2 * y21;
a31 += eig3 * t33 * y31;
a31 += eig4 * t34 * y41;
double a32 = eig1 * Y1 * y12;
a32 += eig2 * Y2 * y22;
a32 += eig3 * t33 * y32;
a32 += eig4 * t34 * y42;
double a33 = eig1 * Y1 * y13;
a33 += eig2 * Y2 * y23;
a33 += eig3 * t33 * y33;
a33 += eig4 * t34 * y43;
double a34 = eig1 * Y1* y14;
a34 += eig2 * Y2 * y24;
a34 += eig3 * t33 * y34;
a34 += eig4 * t34 * y44;
double a41 = eig1 * Z1 * y11;
a41 += eig2 * Z2 * y21;
a41 += eig3 * t43 * y31;
a41 += eig4 * t44 * y41;
double a42 = eig1 * Z1 * y12;
a42 += eig2 * Z2 * y22;
a42 += eig3 * t43 * y32;
a42 += eig4 * t44 * y42;
double a43 = eig1 * Z1 * y13;
a43 += eig2 * Z2 * y23;
a43 += eig3 * t43 * y33;
a43 += eig4 * t44 * y43;
double a44 = eig1 * Z1 * y14;
a44 += eig2 * Z2 * y24;
a44 += eig3 * t43 * y34;
a44 += eig4 * t44 * y44;
/* Regular Jacobians on left: Form 0.5 * (A + |A|) */
double lb = ln * BETA;
double lx = ln * xn;
double ly = ln * yn;
double lz = ln * zn;
double val[4][8];
val[0][0] = 0.5f * a11;
val[0][1] = 0.5f * ((lb * xn) + a12);
val[0][2] = 0.5f * ((lb * yn) + a13);
val[0][3] = 0.5f * ((lb * zn) + a14);
val[1][0] = 0.5f * (lx + a21);
val[1][1] = 0.5f * ((ln * (ubarL + xn * uL)) + a22);
val[1][2] = 0.5f * ((ly * uL) + a23);
val[1][3] = 0.5f * ((lz * uL) + a24);
val[2][0] = 0.5f * (ly + a31);
val[2][1] = 0.5f * ((lx * vL) + a32);
val[2][2] = 0.5f * ((ln * (ubarL + yn * vL)) + a33);
val[2][3] = 0.5f * ((lz * vL) + a34);
val[3][0] = 0.5f * (lz + a41);
val[3][1] = 0.5f * ((lx * wL) + a42);
val[3][2] = 0.5f * ((ly * wL) + a43);
val[3][3] = 0.5f * ((ln * (ubarL + zn * wL)) + a44);
/* Regular Jaobians on right */
val[0][4] = 0.5f * -a11;
val[0][5] = 0.5f * ((lb * xn) - a12);
val[0][6] = 0.5f * ((lb * yn) - a13);
val[0][7] = 0.5f * ((lb * zn) - a14);
val[1][4] = 0.5f * (lx - a21);
val[1][5] = 0.5f * ((ln * (ubarR + xn * uR)) - a22);
val[1][6] = 0.5f * ((ly * uR) - a23);
val[1][7] = 0.5f * ((lz * uR) - a24);
val[2][4] = 0.5f * (ly - a31);
val[2][5] = 0.5f * ((lx * vR) - a32);
val[2][6] = 0.5f * ((ln * (ubarR + yn * vR)) - a33);
val[2][7] = 0.5f * ((lz * vR) - a34);
val[3][4] = 0.5f * (lz - a41);
val[3][5] = 0.5f * ((lx * wR) - a42);
val[3][6] = 0.5f * ((ly * wR) - a43);
val[3][7] = 0.5f * ((ln * (ubarR + zn * wR)) - a44);
uint32_t idxn[2];
idxn[0] = n0;
idxn[1] = n1;
if(part[n0] == t)
{
MatSetValuesBlocked(A, 1, (const int *) &n0, 2, (const int *) idxn,
(const double *) val, ADD_VALUES);
}
if(part[n1] == t)
{
/*
Exchange elements in place
*/
uint32_t j;
for(j = 0; j < bsz; j++)
{
uint32_t k;
for(k = 0; k < 8; k++) val[j][k] = -val[j][k];
}
MatSetValuesBlocked(A, 1, (const int *) &n1, 2, (const int *) idxn,
(const double *) val, ADD_VALUES);
}
}
}
size_t nsnodes = g->b->s->n->sz;
uint32_t *restrict nsptr = g->b->s->n->nptr;
struct xyz *restrict s_xyz = g->b->s->n->xyz;
/* Solid boundary points */
#pragma omp parallel for
for(i = 0; i < nsnodes; i++)
{
uint32_t n = nsptr[i];
double v[3];
v[0] = s_xyz->x0[i];
v[1] = s_xyz->x1[i];
v[2] = s_xyz->x2[i];
uint32_t idxm[3];
idxm[0] = bsz * n + 1;
idxm[1] = bsz * n + 2;
idxm[2] = bsz * n + 3;
uint32_t idxn = bsz * n;
MatSetValues(A, 3, (const int *) idxm, 1, (const int *) &idxn,
(const double *) v, ADD_VALUES);
}
size_t nfnodes = g->b->f->n->sz;
uint32_t *restrict nfptr = g->b->f->n->nptr;
struct xyz *restrict f_xyz = g->b->f->n->xyz;
/* Free boundary points */
#pragma omp parallel for
for(i = 0; i < nfnodes; i++)
{
uint32_t n = nfptr[i];
double xn = f_xyz->x0[i];
double yn = f_xyz->x1[i];
double zn = f_xyz->x2[i];
double ln = sqrt(xn * xn + yn * yn + zn * zn);
xn /= ln;
yn /= ln;
zn /= ln;
/* 9 FLOPS */
/*
Now lets get our other 2 vectors
For first vector, use {1,0,0} and subtract off the component
in the direction of the face normal. If the inner product of
{1,0,0} is close to unity, use {0,1,0}
*/
double dot = xn;
double X1, Y1, Z1;
if(fabs(dot) < 0.95f)
{
X1 = 1.f - dot * xn;
Y1 = - dot * yn;
Z1 = - dot * zn;
}
else
{
dot = yn;
X1 = - dot * xn;
Y1 = 1.f - dot * yn;
Z1 = - dot * zn;
}
/* 6 FLOPS */
/* Normalize the first vector (V1) */
double size = sqrt(X1 * X1 + Y1 * Y1 + Z1 * Z1);
X1 /= size;
Y1 /= size;
Z1 /= size;
/* 9 FLOPS */
/* Take cross-product of normal with V1 to get V2 */
double X2 = yn * Z1 - zn * Y1;
double Y2 = zn * X1 - xn * Z1;
double Z2 = xn * Y1 - yn * X1;
/* 9 FLOPS */
/* Calculate elements of T and T(inverse)
evaluated at freestream */
double ubar0 = xn * iv->u;
ubar0 += yn * iv->v;
ubar0 += zn * iv->w;
double c20 = ubar0 * ubar0 + BETA;
double c0 = sqrt(c20);
double phi1 = xn * BETA;
phi1 += iv->u * ubar0;
double phi2 = yn * BETA;
phi2 += iv->v * ubar0;
double phi3 = zn * BETA;
phi3 += iv->w * ubar0;
double phi4 = Y2 * phi3;
phi4 -= Z2 * phi2;
double phi5 = Z2 * phi1;
phi5 -= X2 * phi3;
double phi6 = X2 * phi2;
phi6 -= Y2 * phi1;
double phi7 = Z1 * phi2;
phi7 -= Y1 * phi3;
double phi8 = X1 * phi3;
phi8 -= Z1 * phi1;
double phi9 = Y1 * phi1;
phi9 -= X1 * phi2;
/* 9 * 3 + 8 FLOPS */
double t13 = c0 * BETA;
double t23 = iv->u * (ubar0 + c0);
t23 += xn * BETA;
double t33 = iv->v * (ubar0 + c0);
t33 += yn * BETA;
double t43 = iv->w * (ubar0 + c0);
t43 += zn * BETA;
double t14 = -c0 * BETA;
double t24 = iv->u * (ubar0 - c0);
t24 += xn * BETA;
double t34 = iv->v * (ubar0 - c0);
t34 += yn * BETA;
double t44 = iv->w * (ubar0 - c0);
t44 += zn * BETA;
double ti11 = iv->u * phi4;
ti11 += iv->v * phi5;
ti11 += iv->w * phi6;
ti11 = -ti11 / BETA / c20;
double ti21 = iv->u * phi7;
ti21 += iv->v * phi8;
ti21 += iv->w * phi9;
ti21 = -ti21 / BETA / c20;
double ti31 = (c0 - ubar0) / (2.f * BETA * c20);
double ti41 = -(c0 + ubar0) / (2.f * BETA * c20);
double ti12 = phi4 / c20;
double ti22 = phi7 / c20;
double ti32 = 0.5f * xn / c20;
double ti42 = 0.5f * xn / c20;
double ti13 = phi5 / c20;
double ti23 = phi8 / c20;
double ti33 = 0.5f * yn / c20;
double ti43 = 0.5f * yn / c20;
double ti14 = phi6 / c20;
double ti24 = phi9 / c20;
double ti34 = 0.5f * zn / c20;
double ti44 = 0.5f * zn / c20;
/* 27 + 16 + 9 + 6 + 6 + 6 FLOPS */
/* Now, get the variables on the "inside" */
double pi = q[bsz * n + 0];
double ui = q[bsz * n + 1];
double vi = q[bsz * n + 2];
double wi = q[bsz * n + 3];
double un = xn * ui;
un += yn * vi;
un += zn * wi;
/* 5 FLOPS */
/* If ubar is negative, take the reference
condition from outside */
double pr, prp, ur, uru, vr, vrv, wr, wrw;
if(un > 0.f)
{
pr = pi;
prp = 1.f;
ur = ui;
uru = 1.f;
vr = vi;
vrv = 1.f;
wr = wi;
wrw = 1.f;
}
else
{
pr = iv->p;
prp = 0.f;
ur = iv->u;
uru = 0.f;
vr = iv->v;
vrv = 0.f;
wr = iv->w;
wrw = 0.f;
}
/* Set rhs */
double rhs1 = ti11 * pr;
rhs1 += ti12 * ur;
rhs1 += ti13 * vr;
rhs1 += ti14 * wr;
double rhs1p = ti11 * prp;
double rhs1u = ti12 * uru;
double rhs1v = ti13 * vrv;
double rhs1w = ti14 * wrw;
double rhs2 = ti21 * pr;
rhs2 += ti22 * ur;
rhs2 += ti23 * vr;
rhs2 += ti24 * wr;
double rhs2p = ti21 * prp;
double rhs2u = ti22 * uru;
double rhs2v = ti23 * vrv;
double rhs2w = ti24 * wrw;
double rhs3 = ti31 * pi;
rhs3 += ti32 * ui;
rhs3 += ti33 * vi;
rhs3 += ti34 * wi;
double rhs4 = ti41 * iv->p;
rhs4 += ti42 * iv->u;
rhs4 += ti43 * iv->v;
rhs4 += ti44 * iv->w;
/* 12 + 24 FLOPS */
/* Now do matrix multiplication to get values on boundary */
double pb = t13 * rhs3;
pb += t14 * rhs4;
double pbp = t13 * ti31;
double pbu = t13 * ti32;
double pbv = t13 * ti33;
double pbw = t13 * ti34;
double ub = X1 * rhs1;
ub += X2 * rhs2;
ub += t23 * rhs3;
ub += t24 * rhs4;
double ubp = X1 * rhs1p;
ubp += X2 * rhs2p;
ubp += t23 * ti31;
double ubu = X1 * rhs1u;
ubu += X2 * rhs2u;
ubu += t23 * ti32;
double ubv = X1 * rhs1v;
ubv += X2 * rhs2v;
ubv += t23 * ti33;
double ubw = X1 * rhs1w;
ubw += X2 * rhs2w;
ubw += t23 * ti34;
double vb = Y1 * rhs1;
vb += Y2 * rhs2;
vb += t33 * rhs3;
vb += t34 * rhs4;
double vbp = Y1 * rhs1p;
vbp += Y2 * rhs2p;
vbp += t33 * ti31;
double vbu = Y1 * rhs1u;
vbu += Y2 * rhs2u;
vbu += t33 * ti32;
double vbv = Y1 * rhs1v;
vbv += Y2 * rhs2v;
vbv += t33 * ti33;
double vbw = Y1 * rhs1w;
vbw += Y2 * rhs2w;
vbw += t33 * ti34;
double wb = Z1 * rhs1;
wb += Z2 * rhs2;
wb += t43 * rhs3;
wb += t44 * rhs4;
double wbp = Z1 * rhs1p;
wbp += Z2 * rhs2p;
wbp += t43 * ti31;
double wbu = Z1 * rhs1u;
wbu += Z2 * rhs2u;
wbu += t43 * ti32;
double wbv = Z1 * rhs1v;
wbv += Z2 * rhs2v;
wbv += t43 * ti33;
double wbw = Z1 * rhs1w;
wbw += Z2 * rhs2w;
wbw += t43 * ti34;
/* 5 * 15 + 6 + 5 + 2 FLOPS */
double unb = xn * ub;
unb += yn * vb;
unb += zn * wb;
double unbp = xn * ubp;
unbp += yn * vbp;
unbp += zn * wbp;
double unbu = xn * ubu;
unbu += yn * vbu;
unbu += zn * wbu;
double unbv = xn * ubv;
unbv += yn * vbv;
unbv += zn * wbv;
double unbw = xn * ubw;
unbw += yn * vbw;
unbw += zn * wbw;
/* 5 * 5 FLOPS */
/* Now add contribution to lhs */
double v[16];
v[0] = ln * BETA * unbp;
v[1] = ln * BETA * unbu;
v[2] = ln * BETA * unbv;
v[3] = ln * BETA * unbw;
v[4] = ln * (ub * unbp + unb * ubp + xn * pbp);
v[5] = ln * (ub * unbu + unb * ubu + xn * pbu);
v[6] = ln * (ub * unbv + unb * ubv + xn * pbv);
v[7] = ln * (ub * unbw + unb * ubw + xn * pbw);
v[8] = ln * (vb * unbp + unb * vbp + yn * pbp);
v[9] = ln * (vb * unbu + unb * vbu + yn * pbu);
v[10] = ln * (vb * unbv + unb * vbv + yn * pbv);
v[11] = ln * (vb * unbw + unb * vbw + yn * pbw);
v[12] = ln * (wb * unbp + unb * wbp + zn * pbp);
v[13] = ln * (wb * unbu + unb * wbu + zn * pbu);
v[14] = ln * (wb * unbv + unb * wbv + zn * pbv);
v[15] = ln * (wb * unbw + unb * wbw + zn * pbw);
/* 6 * 12 + 8 FLOPS */
MatSetValuesBlocked(A, 1, (const int *) &n, 1, (const int *) &n,
(const double *) v, ADD_VALUES);
}
ierr = MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);
CHKERRQ(ierr);
ierr = MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);
CHKERRQ(ierr);
compute_time(&ktime, &fill->t->fill);
#ifdef __USE_HW_COUNTER
const uint64_t cycle = __rdtsc() - icycle;
struct counters end;
perf_read(fd, &end);
struct tot tot;
perf_calc(start, end, &tot);
fill->perf_counters->ctrs->jacobian.cycles += cycle;
fill->perf_counters->ctrs->jacobian.tot.imcR += tot.imcR;
fill->perf_counters->ctrs->jacobian.tot.imcW += tot.imcW;
fill->perf_counters->ctrs->jacobian.tot.edcR += tot.edcR;
fill->perf_counters->ctrs->jacobian.tot.edcW += tot.edcW;
#endif
return 0;
}
|
mandel-omp-task-row.c | /*
* Sequential Mandelbrot program
*
* This program computes and displays all or part of the Mandelbrot
* set. By default, it examines all points in the complex plane
* that have both real and imaginary parts between -2 and 2.
* Command-line parameters allow zooming in on a specific part of
* this range.
*
* Usage:
* mandel [-i maxiter -c x0 y0 -s size -w windowsize]
* where
* maxiter denotes the maximum number of iterations at each point -- by default 1000
* x0, y0, and size specify the range to examine (a square
* centered at (x0 + iy0) of size 2*size by 2*size -- by default,
* a square of size 4 by 4 centered at the origin)
* windowsize denotes the size of the image (diplay window) to compute
*
* Input: none, except the optional command-line arguments
* Output: a graphical display as described in Wilkinson & Allen,
* displayed using the X Window system, plus text output to
* standard output showing the above parameters, plus execution
* time in seconds.
*
* Code based on the original code from Web site for Wilkinson and Allen's
* text on parallel programming:
* http://www.cs.uncc.edu/~abw/parallel/par_prog/
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <malloc.h>
#if _DISPLAY_
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <X11/Xos.h>
#endif
#include <sys/time.h>
double getusec_() {
struct timeval time;
gettimeofday(&time, NULL);
return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec);
}
#define START_COUNT_TIME stamp = getusec_();
#define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\
stamp = stamp/1e6;\
printf ("%s: %0.6fs\n",(_m), stamp);
/* Default values for things. */
#define N 2 /* size of problem space (x, y from -N to N) */
#define NPIXELS 800 /* size of display window in pixels */
int row, col; // variables used to traverse the problem space
/* Structure definition for complex numbers */
typedef struct {
double real, imag;
} complex;
#if _DISPLAY_
/* Functions for GUI */
#include "mandelbrot-gui.h" /* has setup(), interact() */
#endif
void mandelbrot(int height,
int width,
double real_min,
double imag_min,
double scale_real,
double scale_imag,
int maxiter,
#if _DISPLAY_
int setup_return,
Display *display,
Window win,
GC gc,
double scale_color,
double min_color)
#else
int ** output)
#endif
{
/* Calculate points and save/display */
#pragma omp parallel
#pragma omp single
{
for (row = 0; row < height; ++row) {
#pragma omp task firstprivate(row) private(col)
for (col = 0; col < width; ++col) {
complex z, c;
z.real = z.imag = 0;
/* Scale display coordinates to actual region */
c.real = real_min + ((double) col * scale_real);
c.imag = imag_min + ((double) (height-1-row) * scale_imag);
/* height-1-row so y axis displays
* with larger values at top
*/
/* Calculate z0, z1, .... until divergence or maximum iterations */
int k = 0;
double lengthsq, temp;
do {
temp = z.real*z.real - z.imag*z.imag + c.real;
z.imag = 2*z.real*z.imag + c.imag;
z.real = temp;
lengthsq = z.real*z.real + z.imag*z.imag;
++k;
} while (lengthsq < (N*N) && k < maxiter);
#if _DISPLAY_
/* Scale color and display point */
long color = (long) ((k-1) * scale_color) + min_color;
if (setup_return == EXIT_SUCCESS) {
#pragma omp critical
{
XSetForeground (display, gc, color);
XDrawPoint (display, win, gc, col, row);
}
}
#else
output[row][col]=k;
#endif
}
}
}
}
int main(int argc, char *argv[]) {
int maxiter = 1000;
double real_min;
double real_max;
double imag_min;
double imag_max;
int width = NPIXELS; /* dimensions of display window */
int height = NPIXELS;
double size=N, x0 = 0, y0 = 0;
#if _DISPLAY_
Display *display;
Window win;
GC gc;
int setup_return;
long min_color = 0, max_color = 0;
double scale_color;
#else
int ** output;
FILE *fp = NULL;
#endif
double scale_real, scale_imag;
/* Process command-line arguments */
for (int i=1; i<argc; i++) {
if (strcmp(argv[i], "-i")==0) {
maxiter = atoi(argv[++i]);
}
else if (strcmp(argv[i], "-w")==0) {
width = atoi(argv[++i]);
height = width;
}
else if (strcmp(argv[i], "-s")==0) {
size = atof(argv[++i]);
}
#if !_DISPLAY_
else if (strcmp(argv[i], "-o")==0) {
if((fp=fopen("mandel.out", "wb"))==NULL) {
fprintf(stderr, "Unable to open file\n");
return EXIT_FAILURE;
}
}
#endif
else if (strcmp(argv[i], "-c")==0) {
x0 = atof(argv[++i]);
y0 = atof(argv[++i]);
}
else {
#if _DISPLAY_
fprintf(stderr, "Usage: %s [-i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
#else
fprintf(stderr, "Usage: %s [-o -i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
fprintf(stderr, " -o to write computed image to disk (default no file generated)\n");
#endif
fprintf(stderr, " -i to specify maximum number of iterations at each point (default 1000)\n");
#if _DISPLAY_
fprintf(stderr, " -w to specify the size of the display window (default 800x800 pixels)\n");
#else
fprintf(stderr, " -w to specify the size of the image to compute (default 800x800 elements)\n");
#endif
fprintf(stderr, " -c to specify the center x0+iy0 of the square to compute (default origin)\n");
fprintf(stderr, " -s to specify the size of the square to compute (default 2, i.e. size 4 by 4)\n");
return EXIT_FAILURE;
}
}
real_min = x0 - size;
real_max = x0 + size;
imag_min = y0 - size;
imag_max = y0 + size;
/* Produce text output */
fprintf(stdout, "\n");
fprintf(stdout, "Mandelbrot program\n");
fprintf(stdout, "center = (%g, %g), size = %g\n",
(real_max + real_min)/2, (imag_max + imag_min)/2,
(real_max - real_min)/2);
fprintf(stdout, "maximum iterations = %d\n", maxiter);
fprintf(stdout, "\n");
#if _DISPLAY_
/* Initialize for graphical display */
setup_return =
setup(width, height, &display, &win, &gc, &min_color, &max_color);
if (setup_return != EXIT_SUCCESS) {
fprintf(stderr, "Unable to initialize display, continuing\n");
return EXIT_FAILURE;
}
#else
output = malloc(height*sizeof(int *));
for (int row = 0; row < height; ++row)
output[row] = malloc(width*sizeof(int));
#endif
/* Compute factors to scale computational region to window */
scale_real = (double) (real_max - real_min) / (double) width;
scale_imag = (double) (imag_max - imag_min) / (double) height;
#if _DISPLAY_
/* Compute factor for color scaling */
scale_color = (double) (max_color - min_color) / (double) (maxiter - 1);
#endif
/* Start timing */
double stamp;
START_COUNT_TIME;
#if _DISPLAY_
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
setup_return, display, win, gc, scale_color, min_color);
#else
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
output);
#endif
/* End timing */
STOP_COUNT_TIME("Total execution time");
/* Be sure all output is written */
#if _DISPLAY_
if (setup_return == EXIT_SUCCESS) {
XFlush (display);
}
#else
if (fp != NULL)
{
for (int row = 0; row < height; ++row)
if(fwrite(output[row], sizeof(int), width, fp) != width) {
fprintf(stderr, "Output file not written correctly\n");
}
}
#endif
#if _DISPLAY_
/* Wait for user response, then exit program */
if (setup_return == EXIT_SUCCESS) {
interact(display, &win, width, height,
real_min, real_max, imag_min, imag_max);
}
return EXIT_SUCCESS;
#endif
}
|
generated-funcs-regex.c | // RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-unknown-linux-gnu -fopenmp %s -emit-llvm -o - | FileCheck %s
void __test_offloading_42_abcdef_bar_l123(void);
void use(int);
void foo(int a)
{
#pragma omp target
use(a);
__test_offloading_42_abcdef_bar_l123();
int somevar_abc123_;
}
|
sse.h | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2017-2020 Evan Nemerson <evan@nemerson.com>
* 2015-2017 John W. Ratcliff <jratcliffscarab@gmail.com>
* 2015 Brandon Rowlett <browlett@nvidia.com>
* 2015 Ken Fast <kfast@gdeb.com>
*/
#if !defined(SIMDE_X86_SSE_H)
#define SIMDE_X86_SSE_H
#include "mmx.h"
#if defined(_WIN32)
#include <windows.h>
#endif
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
typedef union {
#if defined(SIMDE_VECTOR_SUBSCRIPT)
SIMDE_ALIGN(16) int8_t i8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) int16_t i16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) int32_t i32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) int64_t i64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) uint8_t u8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) uint16_t u16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) uint32_t u32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) uint64_t u64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#if defined(SIMDE_HAVE_INT128_)
SIMDE_ALIGN(16) simde_int128 i128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) simde_uint128 u128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#endif
SIMDE_ALIGN(16) simde_float32 f32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) int_fast32_t i32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) uint_fast32_t u32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#else
SIMDE_ALIGN(16) int8_t i8[16];
SIMDE_ALIGN(16) int16_t i16[8];
SIMDE_ALIGN(16) int32_t i32[4];
SIMDE_ALIGN(16) int64_t i64[2];
SIMDE_ALIGN(16) uint8_t u8[16];
SIMDE_ALIGN(16) uint16_t u16[8];
SIMDE_ALIGN(16) uint32_t u32[4];
SIMDE_ALIGN(16) uint64_t u64[2];
#if defined(SIMDE_HAVE_INT128_)
SIMDE_ALIGN(16) simde_int128 i128[1];
SIMDE_ALIGN(16) simde_uint128 u128[1];
#endif
SIMDE_ALIGN(16) simde_float32 f32[4];
SIMDE_ALIGN(16) int_fast32_t i32f[16 / sizeof(int_fast32_t)];
SIMDE_ALIGN(16) uint_fast32_t u32f[16 / sizeof(uint_fast32_t)];
#endif
SIMDE_ALIGN(16) simde__m64_private m64_private[2];
SIMDE_ALIGN(16) simde__m64 m64[2];
#if defined(SIMDE_X86_SSE_NATIVE)
SIMDE_ALIGN(16) __m128 n;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN(16) int8x16_t neon_i8;
SIMDE_ALIGN(16) int16x8_t neon_i16;
SIMDE_ALIGN(16) int32x4_t neon_i32;
SIMDE_ALIGN(16) int64x2_t neon_i64;
SIMDE_ALIGN(16) uint8x16_t neon_u8;
SIMDE_ALIGN(16) uint16x8_t neon_u16;
SIMDE_ALIGN(16) uint32x4_t neon_u32;
SIMDE_ALIGN(16) uint64x2_t neon_u64;
SIMDE_ALIGN(16) float32x4_t neon_f32;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_ALIGN(16) float64x2_t neon_f64;
#endif
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
SIMDE_ALIGN(16) v128_t wasm_v128;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8;
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16;
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32;
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8;
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16;
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32;
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32;
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64;
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64;
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64;
#endif
#endif
} simde__m128_private;
#if defined(SIMDE_X86_SSE_NATIVE)
typedef __m128 simde__m128;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
typedef float32x4_t simde__m128;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
typedef v128_t simde__m128;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
typedef SIMDE_POWER_ALTIVEC_VECTOR(float) simde__m128;
#elif defined(SIMDE_VECTOR_SUBSCRIPT)
typedef simde_float32 simde__m128 SIMDE_ALIGN(16) SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#else
typedef simde__m128_private simde__m128;
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
typedef simde__m128 __m128;
#endif
HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128), "simde__m128 size incorrect");
HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128_private), "simde__m128_private size incorrect");
#if defined(SIMDE_CHECK_ALIGNMENT) && defined(SIMDE_ALIGN_OF)
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128) == 16, "simde__m128 is not 16-byte aligned");
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128_private) == 16, "simde__m128_private is not 16-byte aligned");
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde__m128_from_private(simde__m128_private v) {
simde__m128 r;
simde_memcpy(&r, &v, sizeof(r));
return r;
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128_private
simde__m128_to_private(simde__m128 v) {
simde__m128_private r;
simde_memcpy(&r, &v, sizeof(r));
return r;
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int8x16_t, neon, i8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int16x8_t, neon, i16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int32x4_t, neon, i32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int64x2_t, neon, i64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint8x16_t, neon, u8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint16x8_t, neon, u16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint32x4_t, neon, u32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint64x2_t, neon, u64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float32x4_t, neon, f32)
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float64x2_t, neon, f64)
#endif
#endif /* defined(SIMDE_ARM_NEON_A32V7_NATIVE) */
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed char), altivec, i8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed short), altivec, i16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed int), altivec, i32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), altivec, u8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), altivec, u16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), altivec, u32)
#if defined(SIMDE_BUG_GCC_95782)
SIMDE_FUNCTION_ATTRIBUTES
SIMDE_POWER_ALTIVEC_VECTOR(float)
simde__m128_to_altivec_f32(simde__m128 value) {
simde__m128_private r_ = simde__m128_to_private(value);
return r_.altivec_f32;
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde__m128_from_altivec_f32(SIMDE_POWER_ALTIVEC_VECTOR(float) value) {
simde__m128_private r_;
r_.altivec_f32 = value;
return simde__m128_from_private(r_);
}
#else
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(float), altivec, f32)
#endif
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed long long), altivec, i64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), altivec, u64)
#endif
#endif /* defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) */
enum {
#if defined(SIMDE_X86_SSE_NATIVE)
SIMDE_MM_ROUND_NEAREST = _MM_ROUND_NEAREST,
SIMDE_MM_ROUND_DOWN = _MM_ROUND_DOWN,
SIMDE_MM_ROUND_UP = _MM_ROUND_UP,
SIMDE_MM_ROUND_TOWARD_ZERO = _MM_ROUND_TOWARD_ZERO
#else
SIMDE_MM_ROUND_NEAREST = 0x0000,
SIMDE_MM_ROUND_DOWN = 0x2000,
SIMDE_MM_ROUND_UP = 0x4000,
SIMDE_MM_ROUND_TOWARD_ZERO = 0x6000
#endif
};
#if defined(_MM_FROUND_TO_NEAREST_INT)
# define SIMDE_MM_FROUND_TO_NEAREST_INT _MM_FROUND_TO_NEAREST_INT
# define SIMDE_MM_FROUND_TO_NEG_INF _MM_FROUND_TO_NEG_INF
# define SIMDE_MM_FROUND_TO_POS_INF _MM_FROUND_TO_POS_INF
# define SIMDE_MM_FROUND_TO_ZERO _MM_FROUND_TO_ZERO
# define SIMDE_MM_FROUND_CUR_DIRECTION _MM_FROUND_CUR_DIRECTION
# define SIMDE_MM_FROUND_RAISE_EXC _MM_FROUND_RAISE_EXC
# define SIMDE_MM_FROUND_NO_EXC _MM_FROUND_NO_EXC
#else
# define SIMDE_MM_FROUND_TO_NEAREST_INT 0x00
# define SIMDE_MM_FROUND_TO_NEG_INF 0x01
# define SIMDE_MM_FROUND_TO_POS_INF 0x02
# define SIMDE_MM_FROUND_TO_ZERO 0x03
# define SIMDE_MM_FROUND_CUR_DIRECTION 0x04
# define SIMDE_MM_FROUND_RAISE_EXC 0x00
# define SIMDE_MM_FROUND_NO_EXC 0x08
#endif
#define SIMDE_MM_FROUND_NINT \
(SIMDE_MM_FROUND_TO_NEAREST_INT | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_FLOOR \
(SIMDE_MM_FROUND_TO_NEG_INF | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_CEIL \
(SIMDE_MM_FROUND_TO_POS_INF | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_TRUNC \
(SIMDE_MM_FROUND_TO_ZERO | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_RINT \
(SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_NEARBYINT \
(SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_NO_EXC)
#if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) && !defined(_MM_FROUND_TO_NEAREST_INT)
# define _MM_FROUND_TO_NEAREST_INT SIMDE_MM_FROUND_TO_NEAREST_INT
# define _MM_FROUND_TO_NEG_INF SIMDE_MM_FROUND_TO_NEG_INF
# define _MM_FROUND_TO_POS_INF SIMDE_MM_FROUND_TO_POS_INF
# define _MM_FROUND_TO_ZERO SIMDE_MM_FROUND_TO_ZERO
# define _MM_FROUND_CUR_DIRECTION SIMDE_MM_FROUND_CUR_DIRECTION
# define _MM_FROUND_RAISE_EXC SIMDE_MM_FROUND_RAISE_EXC
# define _MM_FROUND_NINT SIMDE_MM_FROUND_NINT
# define _MM_FROUND_FLOOR SIMDE_MM_FROUND_FLOOR
# define _MM_FROUND_CEIL SIMDE_MM_FROUND_CEIL
# define _MM_FROUND_TRUNC SIMDE_MM_FROUND_TRUNC
# define _MM_FROUND_RINT SIMDE_MM_FROUND_RINT
# define _MM_FROUND_NEARBYINT SIMDE_MM_FROUND_NEARBYINT
#endif
SIMDE_FUNCTION_ATTRIBUTES
unsigned int
SIMDE_MM_GET_ROUNDING_MODE(void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _MM_GET_ROUNDING_MODE();
#elif defined(SIMDE_HAVE_FENV_H)
unsigned int vfe_mode;
switch (fegetround()) {
#if defined(FE_TONEAREST)
case FE_TONEAREST:
vfe_mode = SIMDE_MM_ROUND_NEAREST;
break;
#endif
#if defined(FE_TOWARDZERO)
case FE_TOWARDZERO:
vfe_mode = SIMDE_MM_ROUND_DOWN;
break;
#endif
#if defined(FE_UPWARD)
case FE_UPWARD:
vfe_mode = SIMDE_MM_ROUND_UP;
break;
#endif
#if defined(FE_DOWNWARD)
case FE_DOWNWARD:
vfe_mode = SIMDE_MM_ROUND_TOWARD_ZERO;
break;
#endif
default:
vfe_mode = SIMDE_MM_ROUND_NEAREST;
break;
}
return vfe_mode;
#else
return SIMDE_MM_ROUND_NEAREST;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_GET_ROUNDING_MODE() SIMDE_MM_GET_ROUNDING_MODE()
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
SIMDE_MM_SET_ROUNDING_MODE(unsigned int a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_MM_SET_ROUNDING_MODE(a);
#elif defined(SIMDE_HAVE_FENV_H)
int fe_mode = FE_TONEAREST;
switch (a) {
#if defined(FE_TONEAREST)
case SIMDE_MM_ROUND_NEAREST:
fe_mode = FE_TONEAREST;
break;
#endif
#if defined(FE_TOWARDZERO)
case SIMDE_MM_ROUND_TOWARD_ZERO:
fe_mode = FE_TOWARDZERO;
break;
#endif
#if defined(FE_DOWNWARD)
case SIMDE_MM_ROUND_DOWN:
fe_mode = FE_DOWNWARD;
break;
#endif
#if defined(FE_UPWARD)
case SIMDE_MM_ROUND_UP:
fe_mode = FE_UPWARD;
break;
#endif
default:
return;
}
fesetround(fe_mode);
#else
(void) a;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_SET_ROUNDING_MODE(a) SIMDE_MM_SET_ROUNDING_MODE(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_mm_getcsr (void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_getcsr();
#else
return SIMDE_MM_GET_ROUNDING_MODE();
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_getcsr() simde_mm_getcsr()
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_setcsr (uint32_t a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_setcsr(a);
#else
SIMDE_MM_SET_ROUNDING_MODE(HEDLEY_STATIC_CAST(unsigned int, a));
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_setcsr(a) simde_mm_setcsr(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_round_ps (simde__m128 a, int rounding)
SIMDE_REQUIRE_CONSTANT_RANGE(rounding, 0, 15) {
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
/* For architectures which lack a current direction SIMD instruction.
*
* Note that NEON actually has a current rounding mode instruction,
* but in ARMv8+ the rounding mode is ignored and nearest is always
* used, so we treat ARMv7 as having a rounding mode but ARMv8 as
* not. */
#if \
defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || \
defined(SIMDE_ARM_NEON_A32V8)
if ((rounding & 7) == SIMDE_MM_FROUND_CUR_DIRECTION)
rounding = HEDLEY_STATIC_CAST(int, SIMDE_MM_GET_ROUNDING_MODE()) << 13;
#endif
switch (rounding & ~SIMDE_MM_FROUND_NO_EXC) {
case SIMDE_MM_FROUND_CUR_DIRECTION:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_round(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0
r_.neon_f32 = vrndiq_f32(a_.neon_f32);
#elif defined(simde_math_nearbyintf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_nearbyintf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_NEAREST_INT:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_round(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0
r_.neon_f32 = vrndaq_f32(a_.neon_f32);
#elif defined(simde_math_roundf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_roundf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_NEG_INF:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_floor(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0
r_.neon_f32 = vrndmq_f32(a_.neon_f32);
#elif defined(simde_math_floorf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_floorf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_POS_INF:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_ceil(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0
r_.neon_f32 = vrndpq_f32(a_.neon_f32);
#elif defined(simde_math_ceilf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_ceilf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_ZERO:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_trunc(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0
r_.neon_f32 = vrndq_f32(a_.neon_f32);
#elif defined(simde_math_truncf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_truncf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
default:
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
}
return simde__m128_from_private(r_);
}
#if defined(SIMDE_X86_SSE4_1_NATIVE)
#define simde_mm_round_ps(a, rounding) _mm_round_ps(a, rounding)
#endif
#if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES)
#define _mm_round_ps(a, rounding) simde_mm_round_ps(a, rounding)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ps(e3, e2, e1, e0);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN(16) simde_float32 data[4] = { e0, e1, e2, e3 };
r_.neon_f32 = vld1q_f32(data);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_make(e0, e1, e2, e3);
#else
r_.f32[0] = e0;
r_.f32[1] = e1;
r_.f32[2] = e2;
r_.f32[3] = e3;
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ps(e3, e2, e1, e0) simde_mm_set_ps(e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ps1 (simde_float32 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ps1(a);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vdupq_n_f32(a);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
(void) a;
return vec_splats(a);
#else
return simde_mm_set_ps(a, a, a, a);
#endif
}
#define simde_mm_set1_ps(a) simde_mm_set_ps1(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ps1(a) simde_mm_set_ps1(a)
# define _mm_set1_ps(a) simde_mm_set1_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_move_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_move_ss(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(b_.neon_f32, 0), a_.neon_f32, 0);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) m = {
16, 17, 18, 19,
4, 5, 6, 7,
8, 9, 10, 11,
12, 13, 14, 15
};
r_.altivec_f32 = vec_perm(a_.altivec_f32, b_.altivec_f32, m);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v8x16_shuffle(b_.wasm_v128, a_.wasm_v128, 0, 1, 2, 3, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 4, 1, 2, 3);
#else
r_.f32[0] = b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_move_ss(a, b) simde_mm_move_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_add_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_add_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vaddq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_add(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_add(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 + b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] + b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_add_ps(a, b) simde_mm_add_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_add_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_add_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_add_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t b0 = vgetq_lane_f32(b_.neon_f32, 0);
float32x4_t value = vsetq_lane_f32(b0, vdupq_n_f32(0), 0);
// the upper values in the result must be the remnants of <a>.
r_.neon_f32 = vaddq_f32(a_.neon_f32, value);
#else
r_.f32[0] = a_.f32[0] + b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_add_ss(a, b) simde_mm_add_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_and_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_and_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vandq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_and(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = a_.i32 & b_.i32;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_and(a_.altivec_f32, b_.altivec_f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] & b_.i32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_and_ps(a, b) simde_mm_and_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_andnot_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_andnot_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vbicq_s32(b_.neon_i32, a_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_andnot(b_.wasm_v128, a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_andc(b_.altivec_f32, a_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = ~a_.i32 & b_.i32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ~(a_.i32[i]) & b_.i32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_andnot_ps(a, b) simde_mm_andnot_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_xor_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_xor_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = veorq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_xor(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_xor(a_.altivec_i32, b_.altivec_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f ^ b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i] ^ b_.u32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_xor_ps(a, b) simde_mm_xor_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_or_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_or_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vorrq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_or(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_or(a_.altivec_i32, b_.altivec_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f | b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i] | b_.u32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_or_ps(a, b) simde_mm_or_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_not_ps(simde__m128 a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
/* Note: we use ints instead of floats because we don't want cmpeq
* to return false for (NaN, NaN) */
__m128i ai = _mm_castps_si128(a);
return _mm_castsi128_ps(_mm_andnot_si128(ai, _mm_cmpeq_epi32(ai, ai)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vmvnq_s32(a_.neon_i32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_nor(a_.altivec_i32, a_.altivec_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_not(a_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = ~a_.i32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ~(a_.i32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_select_ps(simde__m128 a, simde__m128 b, simde__m128 mask) {
/* This function is for when you want to blend two elements together
* according to a mask. It is similar to _mm_blendv_ps, except that
* it is undefined whether the blend is based on the highest bit in
* each lane (like blendv) or just bitwise operations. This allows
* us to implement the function efficiently everywhere.
*
* Basically, you promise that all the lanes in mask are either 0 or
* ~0. */
#if defined(SIMDE_X86_SSE4_1_NATIVE)
return _mm_blendv_ps(a, b, mask);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b),
mask_ = simde__m128_to_private(mask);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vbslq_s32(mask_.neon_u32, b_.neon_i32, a_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_bitselect(b_.wasm_v128, a_.wasm_v128, mask_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_sel(a_.altivec_i32, b_.altivec_i32, mask_.altivec_u32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = a_.i32 ^ ((a_.i32 ^ b_.i32) & mask_.i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] ^ ((a_.i32[i] ^ b_.i32[i]) & mask_.i32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_avg_pu16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_avg_pu16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u16 = vrhadd_u16(b_.neon_u16, a_.neon_u16);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_)
uint32_t wa SIMDE_VECTOR(16);
uint32_t wb SIMDE_VECTOR(16);
uint32_t wr SIMDE_VECTOR(16);
SIMDE_CONVERT_VECTOR_(wa, a_.u16);
SIMDE_CONVERT_VECTOR_(wb, b_.u16);
wr = (wa + wb + 1) >> 1;
SIMDE_CONVERT_VECTOR_(r_.u16, wr);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = (a_.u16[i] + b_.u16[i] + 1) >> 1;
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pavgw(a, b) simde_mm_avg_pu16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_avg_pu16(a, b) simde_mm_avg_pu16(a, b)
# define _m_pavgw(a, b) simde_mm_avg_pu16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_avg_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_avg_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vrhadd_u8(b_.neon_u8, a_.neon_u8);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_)
uint16_t wa SIMDE_VECTOR(16);
uint16_t wb SIMDE_VECTOR(16);
uint16_t wr SIMDE_VECTOR(16);
SIMDE_CONVERT_VECTOR_(wa, a_.u8);
SIMDE_CONVERT_VECTOR_(wb, b_.u8);
wr = (wa + wb + 1) >> 1;
SIMDE_CONVERT_VECTOR_(r_.u8, wr);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] + b_.u8[i] + 1) >> 1;
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pavgb(a, b) simde_mm_avg_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_avg_pu8(a, b) simde_mm_avg_pu8(a, b)
# define _m_pavgb(a, b) simde_mm_avg_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_abs_ps(simde__m128 a) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7,1,0))
return _mm512_castps512_ps128(_mm512_abs_ps(_mm512_castps128_ps512(a)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vabsq_f32(a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_abs(a_.altivec_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_abs(a_.wasm_v128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_fabsf(a_.f32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpeq_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpeq_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vceqq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_eq(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), a_.f32 == b_.f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] == b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpeq_ps(a, b) simde_mm_cmpeq_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpeq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpeq_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpeq_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] == b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpeq_ss(a, b) simde_mm_cmpeq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpge_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpge_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcgeq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_ge(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpge(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 >= b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] >= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpge_ps(a, b) simde_mm_cmpge_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpge_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpge_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpge_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] >= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpge_ss(a, b) simde_mm_cmpge_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpgt_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpgt_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcgtq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpgt(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 > b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] > b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpgt_ps(a, b) simde_mm_cmpgt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpgt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpgt_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpgt_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] > b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpgt_ss(a, b) simde_mm_cmpgt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmple_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmple_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcleq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_le(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmple(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 <= b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] <= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmple_ps(a, b) simde_mm_cmple_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmple_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmple_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmple_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] <= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmple_ss(a, b) simde_mm_cmple_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmplt_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmplt_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcltq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmplt(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 < b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] < b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmplt_ps(a, b) simde_mm_cmplt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmplt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmplt_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmplt_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] < b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmplt_ss(a, b) simde_mm_cmplt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpneq_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpneq_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_ne(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE) && SIMDE_ARCH_POWER_CHECK(900) && !defined(HEDLEY_IBM_VERSION)
/* vec_cmpne(SIMDE_POWER_ALTIVEC_VECTOR(float), SIMDE_POWER_ALTIVEC_VECTOR(float))
is missing from XL C/C++ v16.1.1,
though the documentation (table 89 on page 432 of the IBM XL C/C++ for
Linux Compiler Reference, Version 16.1.1) shows that it should be
present. Both GCC and clang support it. */
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpne(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 != b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] != b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpneq_ps(a, b) simde_mm_cmpneq_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpneq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpneq_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpneq_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] != b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpneq_ss(a, b) simde_mm_cmpneq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnge_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmplt_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnge_ps(a, b) simde_mm_cmpnge_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnge_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmplt_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnge_ss(a, b) simde_mm_cmpnge_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpngt_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmple_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpngt_ps(a, b) simde_mm_cmpngt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpngt_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmple_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpngt_ss(a, b) simde_mm_cmpngt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnle_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmpgt_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnle_ps(a, b) simde_mm_cmpnle_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnle_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmpgt_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnle_ss(a, b) simde_mm_cmpnle_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnlt_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmpge_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnlt_ps(a, b) simde_mm_cmpnlt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnlt_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmpge_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnlt_ss(a, b) simde_mm_cmpnlt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpord_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpord_ps(a, b);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_v128_and(wasm_f32x4_eq(a, a), wasm_f32x4_eq(b, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
/* Note: NEON does not have ordered compare builtin
Need to compare a eq a and b eq b to check for NaN
Do AND of results to get final */
uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32);
r_.neon_u32 = vandq_u32(ceqaa, ceqbb);
#elif defined(simde_math_isnanf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? UINT32_C(0) : ~UINT32_C(0);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpord_ps(a, b) simde_mm_cmpord_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpunord_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpunord_ps(a, b);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_v128_or(wasm_f32x4_ne(a, a), wasm_f32x4_ne(b, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32);
r_.neon_u32 = vmvnq_u32(vandq_u32(ceqaa, ceqbb));
#elif defined(simde_math_isnanf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? ~UINT32_C(0) : UINT32_C(0);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpunord_ps(a, b) simde_mm_cmpunord_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpunord_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpunord_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpunord_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(simde_math_isnanf)
r_.u32[0] = (simde_math_isnanf(a_.f32[0]) || simde_math_isnanf(b_.f32[0])) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpunord_ss(a, b) simde_mm_cmpunord_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comieq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comieq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0);
#else
return a_.f32[0] == b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comieq_ss(a, b) simde_mm_comieq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comige_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comige_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0);
#else
return a_.f32[0] >= b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comige_ss(a, b) simde_mm_comige_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comigt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comigt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0);
#else
return a_.f32[0] > b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comigt_ss(a, b) simde_mm_comigt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comile_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comile_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0);
#else
return a_.f32[0] <= b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comile_ss(a, b) simde_mm_comile_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comilt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comilt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0);
#else
return a_.f32[0] < b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comilt_ss(a, b) simde_mm_comilt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comineq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comineq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0);
#else
return a_.f32[0] != b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comineq_ss(a, b) simde_mm_comineq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_copysign_ps(simde__m128 dest, simde__m128 src) {
simde__m128_private
r_,
dest_ = simde__m128_to_private(dest),
src_ = simde__m128_to_private(src);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const uint32x4_t sign_pos = vreinterpretq_u32_f32(vdupq_n_f32(-SIMDE_FLOAT32_C(0.0)));
r_.neon_u32 = vbslq_u32(sign_pos, src_.neon_u32, dest_.neon_u32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
const v128_t sign_pos = wasm_f32x4_splat(-0.0f);
r_.wasm_v128 = wasm_v128_bitselect(src_.wasm_v128, dest_.wasm_v128, sign_pos);
#elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE)
#if !defined(HEDLEY_IBM_VERSION)
r_.altivec_f32 = vec_cpsgn(dest_.altivec_f32, src_.altivec_f32);
#else
r_.altivec_f32 = vec_cpsgn(src_.altivec_f32, dest_.altivec_f32);
#endif
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) sign_pos = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_splats(-0.0f));
r_.altivec_f32 = vec_sel(dest_.altivec_f32, src_.altivec_f32, sign_pos);
#elif defined(SIMDE_IEEE754_STORAGE)
(void) src_;
(void) dest_;
simde__m128 sign_pos = simde_mm_set1_ps(-0.0f);
r_ = simde__m128_to_private(simde_mm_xor_ps(dest, simde_mm_and_ps(simde_mm_xor_ps(dest, src), sign_pos)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_copysignf(dest_.f32[i], src_.f32[i]);
}
#endif
return simde__m128_from_private(r_);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_xorsign_ps(simde__m128 dest, simde__m128 src) {
return simde_mm_xor_ps(simde_mm_and_ps(simde_mm_set1_ps(-0.0f), src), dest);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvt_pi2ps (simde__m128 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvt_pi2ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32);
r_.m64_private[1] = a_.m64_private[1];
#else
r_.f32[0] = (simde_float32) b_.i32[0];
r_.f32[1] = (simde_float32) b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_pi2ps(a, b) simde_mm_cvt_pi2ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvt_ps2pi (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvt_ps2pi(a);
#else
simde__m64_private r_;
simde__m128_private a_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_) && !defined(__clang__) && 0
SIMDE_CONVERT_VECTOR_(r_.i32, a_.m64_private[0].f32);
#else
a_ = simde__m128_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, simde_math_nearbyintf(a_.f32[i]));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_ps2pi(a) simde_mm_cvt_ps2pi((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvt_si2ss (simde__m128 a, int32_t b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvt_si2ss(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float, b), a_.neon_f32, 0);
#else
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
r_.i32[1] = a_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_si2ss(a, b) simde_mm_cvt_si2ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvt_ss2si (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvt_ss2si(a);
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
return vgetq_lane_s32(vcvtnq_s32_f32(simde__m128_to_neon_f32(a)), 0);
#else
simde__m128_private a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
return SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_ss2si(a) simde_mm_cvt_ss2si((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi16_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi16_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && 0 /* TODO */
r_.neon_f32 = vmovl_s16(vget_low_s16(vuzp1q_s16(a_.neon_i16, vmovq_n_s16(0))));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f32, a_.i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
simde_float32 v = a_.i16[i];
r_.f32[i] = v;
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi16_ps(a) simde_mm_cvtpi16_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi32_ps (simde__m128 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi32_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32);
r_.m64_private[1] = a_.m64_private[1];
#else
r_.f32[0] = (simde_float32) b_.i32[0];
r_.f32[1] = (simde_float32) b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi32_ps(a, b) simde_mm_cvtpi32_ps((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi32x2_ps (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi32x2_ps(a, b);
#else
simde__m128_private r_;
simde__m64_private
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vcombine_s32(a_.neon_i32, b_.neon_i32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, a_.i32);
SIMDE_CONVERT_VECTOR_(r_.m64_private[1].f32, b_.i32);
#else
r_.f32[0] = (simde_float32) a_.i32[0];
r_.f32[1] = (simde_float32) a_.i32[1];
r_.f32[2] = (simde_float32) b_.i32[0];
r_.f32[3] = (simde_float32) b_.i32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi32x2_ps(a, b) simde_mm_cvtpi32x2_ps(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi8_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi8_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vmovl_s16(vget_low_s16(vmovl_s8(a_.neon_i8))));
#else
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[0]);
r_.f32[1] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[1]);
r_.f32[2] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[2]);
r_.f32[3] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[3]);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi8_ps(a) simde_mm_cvtpi8_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi16 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi16(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_i16 = vmovn_s32(vcvtq_s32_f32(vrndiq_f32(a_.neon_f32)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = SIMDE_CONVERT_FTOI(int16_t, simde_math_roundf(a_.f32[i]));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi16(a) simde_mm_cvtps_pi16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi32 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi32(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(vrndiq_f32(a_.neon_f32)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, simde_math_roundf(a_.f32[i]));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi32(a) simde_mm_cvtps_pi32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi8 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi8(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95471)
/* Clamp the input to [INT8_MIN, INT8_MAX], round, convert to i32, narrow to
* i16, combine with an all-zero vector of i16 (which will become the upper
* half), narrow to i8. */
float32x4_t max = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MAX));
float32x4_t min = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MIN));
float32x4_t values = vrndnq_f32(vmaxq_f32(vminq_f32(max, a_.neon_f32), min));
r_.neon_i8 = vmovn_s16(vcombine_s16(vmovn_s32(vcvtq_s32_f32(values)), vdup_n_s16(0)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.f32) / sizeof(a_.f32[0])) ; i++) {
if (a_.f32[i] > HEDLEY_STATIC_CAST(simde_float32, INT8_MAX))
r_.i8[i] = INT8_MAX;
else if (a_.f32[i] < HEDLEY_STATIC_CAST(simde_float32, INT8_MIN))
r_.i8[i] = INT8_MIN;
else
r_.i8[i] = SIMDE_CONVERT_FTOI(int8_t, simde_math_roundf(a_.f32[i]));
}
/* Note: the upper half is undefined */
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi8(a) simde_mm_cvtps_pi8((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpu16_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpu16_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(a_.neon_u16));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f32, a_.u16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (simde_float32) a_.u16[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpu16_ps(a) simde_mm_cvtpu16_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpu8_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpu8_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(vget_low_u16(vmovl_u8(a_.neon_u8))));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = HEDLEY_STATIC_CAST(simde_float32, a_.u8[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpu8_ps(a) simde_mm_cvtpu8_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtsi32_ss (simde__m128 a, int32_t b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtsi32_ss(a, b);
#else
simde__m128_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0);
#else
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtsi32_ss(a, b) simde_mm_cvtsi32_ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtsi64_ss (simde__m128 a, int64_t b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64)
#if !defined(__PGI)
return _mm_cvtsi64_ss(a, b);
#else
return _mm_cvtsi64x_ss(a, b);
#endif
#else
simde__m128_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0);
#else
r_ = a_;
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtsi64_ss(a, b) simde_mm_cvtsi64_ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32
simde_mm_cvtss_f32 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtss_f32(a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vgetq_lane_f32(a_.neon_f32, 0);
#else
return a_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtss_f32(a) simde_mm_cvtss_f32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvtss_si32 (simde__m128 a) {
return simde_mm_cvt_ss2si(a);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtss_si32(a) simde_mm_cvtss_si32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_mm_cvtss_si64 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64)
#if !defined(__PGI)
return _mm_cvtss_si64(a);
#else
return _mm_cvtss_si64x(a);
#endif
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(vgetq_lane_f32(a_.neon_f32, 0)));
#else
return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(a_.f32[0]));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtss_si64(a) simde_mm_cvtss_si64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtt_ps2pi (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtt_ps2pi(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.i32, a_.m64_private[0].f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, a_.f32[i]);
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_mm_cvttps_pi32(a) simde_mm_cvtt_ps2pi(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtt_ps2pi(a) simde_mm_cvtt_ps2pi((a))
# define _mm_cvttps_pi32(a) simde_mm_cvttps_pi32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvtt_ss2si (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtt_ss2si(a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return SIMDE_CONVERT_FTOI(int32_t, vgetq_lane_f32(a_.neon_f32, 0));
#else
return SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]);
#endif
#endif
}
#define simde_mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtt_ss2si(a) simde_mm_cvtt_ss2si((a))
# define _mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_mm_cvttss_si64 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64) && !defined(_MSC_VER)
#if defined(__PGI)
return _mm_cvttss_si64x(a);
#else
return _mm_cvttss_si64(a);
#endif
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return SIMDE_CONVERT_FTOI(int64_t, vgetq_lane_f32(a_.neon_f32, 0));
#else
return SIMDE_CONVERT_FTOI(int64_t, a_.f32[0]);
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvttss_si64(a) simde_mm_cvttss_si64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpord_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpord_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpord_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(simde_math_isnanf)
r_.u32[0] = (simde_math_isnanf(simde_mm_cvtss_f32(a)) || simde_math_isnanf(simde_mm_cvtss_f32(b))) ? UINT32_C(0) : ~UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpord_ss(a, b) simde_mm_cmpord_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_div_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_div_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vdivq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t recip0 = vrecpeq_f32(b_.neon_f32);
float32x4_t recip1 = vmulq_f32(recip0, vrecpsq_f32(recip0, b_.neon_f32));
r_.neon_f32 = vmulq_f32(a_.neon_f32, recip1);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_div(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 / b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] / b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_div_ps(a, b) simde_mm_div_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_div_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_div_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_div_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value =
vgetq_lane_f32(simde__m128_to_private(simde_mm_div_ps(a, b)).neon_f32, 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = a_.f32[0] / b_.f32[0];
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_div_ss(a, b) simde_mm_div_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int16_t
simde_mm_extract_pi16 (simde__m64 a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) {
simde__m64_private a_ = simde__m64_to_private(a);
return a_.i16[imm8];
}
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(HEDLEY_PGI_VERSION)
# if HEDLEY_HAS_WARNING("-Wvector-conversion")
/* https://bugs.llvm.org/show_bug.cgi?id=44589 */
# define simde_mm_extract_pi16(a, imm8) ( \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \
HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16((a), (imm8))) \
HEDLEY_DIAGNOSTIC_POP \
)
# else
# define simde_mm_extract_pi16(a, imm8) HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16(a, imm8))
# endif
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
# define simde_mm_extract_pi16(a, imm8) vget_lane_s16(simde__m64_to_private(a).neon_i16, imm8)
#endif
#define simde_m_pextrw(a, imm8) simde_mm_extract_pi16(a, imm8)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_extract_pi16(a, imm8) simde_mm_extract_pi16((a), (imm8))
# define _m_pextrw(a, imm8) simde_mm_extract_pi16((a), (imm8))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_insert_pi16 (simde__m64 a, int16_t i, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) {
simde__m64_private
r_,
a_ = simde__m64_to_private(a);
r_.i64[0] = a_.i64[0];
r_.i16[imm8] = i;
return simde__m64_from_private(r_);
}
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# if HEDLEY_HAS_WARNING("-Wvector-conversion")
/* https://bugs.llvm.org/show_bug.cgi?id=44589 */
# define ssimde_mm_insert_pi16(a, i, imm8) ( \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \
(_mm_insert_pi16((a), (i), (imm8))) \
HEDLEY_DIAGNOSTIC_POP \
)
# else
# define simde_mm_insert_pi16(a, i, imm8) _mm_insert_pi16(a, i, imm8)
# endif
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
# define simde_mm_insert_pi16(a, i, imm8) simde__m64_from_neon_i16(vset_lane_s16((i), simde__m64_to_neon_i16(a), (imm8)))
#endif
#define simde_m_pinsrw(a, i, imm8) (simde_mm_insert_pi16(a, i, imm8))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_insert_pi16(a, i, imm8) simde_mm_insert_pi16(a, i, imm8)
# define _m_pinsrw(a, i, imm8) simde_mm_insert_pi16(a, i, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
simde_assert_aligned(16, mem_addr);
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ps(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_f32(mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_vsx_ld(0, mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_ld(0, mem_addr);
#else
r_ = *SIMDE_ALIGN_CAST(simde__m128_private const*, mem_addr);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ps(mem_addr) simde_mm_load_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load_ps1 (simde_float32 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ps1(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_dup_f32(mem_addr);
#else
r_ = simde__m128_to_private(simde_mm_set1_ps(*mem_addr));
#endif
return simde__m128_from_private(r_);
#endif
}
#define simde_mm_load1_ps(mem_addr) simde_mm_load_ps1(mem_addr)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ps1(mem_addr) simde_mm_load_ps1(mem_addr)
# define _mm_load1_ps(mem_addr) simde_mm_load_ps1(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load_ss (simde_float32 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ss(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(*mem_addr, vdupq_n_f32(0), 0);
#else
r_.f32[0] = *mem_addr;
r_.i32[1] = 0;
r_.i32[2] = 0;
r_.i32[3] = 0;
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ss(mem_addr) simde_mm_load_ss(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadh_pi (simde__m128 a, simde__m64 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_loadh_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vget_low_f32(a_.neon_f32), vld1_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)));
#else
simde__m64_private b_ = *HEDLEY_REINTERPRET_CAST(simde__m64_private const*, mem_addr);
r_.f32[0] = a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = b_.f32[0];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadh_pi(a, mem_addr) simde_mm_loadh_pi((a), (simde__m64 const*) (mem_addr))
#endif
/* The SSE documentation says that there are no alignment requirements
for mem_addr. Unfortunately they used the __m64 type for the argument
which is supposed to be 8-byte aligned, so some compilers (like clang
with -Wcast-align) will generate a warning if you try to cast, say,
a simde_float32* to a simde__m64* for this function.
I think the choice of argument type is unfortunate, but I do think we
need to stick to it here. If there is demand I can always add something
like simde_x_mm_loadl_f32(simde__m128, simde_float32 mem_addr[2]) */
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadl_pi (simde__m128 a, simde__m64 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadl_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vld1_f32(
HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)), vget_high_f32(a_.neon_f32));
#else
simde__m64_private b_;
simde_memcpy(&b_, mem_addr, sizeof(b_));
r_.i32[0] = b_.i32[0];
r_.i32[1] = b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadl_pi(a, mem_addr) simde_mm_loadl_pi((a), (simde__m64 const*) (mem_addr))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadr_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
simde_assert_aligned(16, mem_addr);
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadr_ps(mem_addr);
#else
simde__m128_private
r_,
v_ = simde__m128_to_private(simde_mm_load_ps(mem_addr));
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vrev64q_f32(v_.neon_f32);
r_.neon_f32 = vextq_f32(r_.neon_f32, r_.neon_f32, 2);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && 0
/* TODO: XLC documentation has it, but it doesn't seem to work.
* More investigation is necessary. */
r_.altivec_f32 = vec_reve(a_.altivec_f32);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, v_.f32, v_.f32, 3, 2, 1, 0);
#else
r_.f32[0] = v_.f32[3];
r_.f32[1] = v_.f32[2];
r_.f32[2] = v_.f32[1];
r_.f32[3] = v_.f32[0];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadr_ps(mem_addr) simde_mm_loadr_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadu_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadu_ps(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_load(mem_addr);
#else
r_.f32[0] = mem_addr[0];
r_.f32[1] = mem_addr[1];
r_.f32[2] = mem_addr[2];
r_.f32[3] = mem_addr[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadu_ps(mem_addr) simde_mm_loadu_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_maskmove_si64 (simde__m64 a, simde__m64 mask, int8_t* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
_mm_maskmove_si64(a, mask, HEDLEY_REINTERPRET_CAST(char*, mem_addr));
#else
simde__m64_private
a_ = simde__m64_to_private(a),
mask_ = simde__m64_to_private(mask);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++)
if (mask_.i8[i] < 0)
mem_addr[i] = a_.i8[i];
#endif
}
#define simde_m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64(a, mask, mem_addr)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_maskmove_si64(a, mask, mem_addr) simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr)))
# define _m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr)))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_max_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_max_pi16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vmax_s16(a_.neon_i16, b_.neon_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] > b_.i16[i]) ? a_.i16[i] : b_.i16[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmaxsw(a, b) simde_mm_max_pi16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_pi16(a, b) simde_mm_max_pi16(a, b)
# define _m_pmaxsw(a, b) simde_mm_max_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_max_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_max_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vmaxq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_max(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_max(a_.altivec_f32, b_.altivec_f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (a_.f32[i] > b_.f32[i]) ? a_.f32[i] : b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_ps(a, b) simde_mm_max_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_max_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_max_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vmax_u8(a_.neon_u8, b_.neon_u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] > b_.u8[i]) ? a_.u8[i] : b_.u8[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmaxub(a, b) simde_mm_max_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_pu8(a, b) simde_mm_max_pu8(a, b)
# define _m_pmaxub(a, b) simde_mm_max_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_max_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_max_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_max_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value = vgetq_lane_f32(maxq_f32(a_.neon_f32, b_.neon_f32), 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = (a_.f32[0] > b_.f32[0]) ? a_.f32[0] : b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_ss(a, b) simde_mm_max_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_min_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_min_pi16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vmin_s16(a_.neon_i16, b_.neon_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] < b_.i16[i]) ? a_.i16[i] : b_.i16[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pminsw(a, b) simde_mm_min_pi16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_pi16(a, b) simde_mm_min_pi16(a, b)
# define _m_pminsw(a, b) simde_mm_min_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_min_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_min_ps(a, b);
#elif defined(SIMDE_FAST_NANS) && defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return simde__m128_from_neon_f32(vminq_f32(simde__m128_to_neon_f32(a), simde__m128_to_neon_f32(b)));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_FAST_NANS)
r_.wasm_v128 = wasm_f32x4_min(a_.wasm_v128, b_.wasm_v128);
#else
r_.wasm_v128 = wasm_v128_bitselect(a_.wasm_v128, b_.wasm_v128, wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128));
#endif
return simde__m128_from_private(r_);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_FAST_NANS)
r_.altivec_f32 = vec_min(a_.altivec_f32, b_.altivec_f32);
#else
r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32, vec_cmpgt(b_.altivec_f32, a_.altivec_f32));
#endif
return simde__m128_from_private(r_);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
simde__m128 mask = simde_mm_cmplt_ps(a, b);
return simde_mm_or_ps(simde_mm_and_ps(mask, a), simde_mm_andnot_ps(mask, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (a_.f32[i] < b_.f32[i]) ? a_.f32[i] : b_.f32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_ps(a, b) simde_mm_min_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_min_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_min_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vmin_u8(a_.neon_u8, b_.neon_u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] < b_.u8[i]) ? a_.u8[i] : b_.u8[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pminub(a, b) simde_mm_min_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_pu8(a, b) simde_mm_min_pu8(a, b)
# define _m_pminub(a, b) simde_mm_min_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_min_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_min_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_min_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value = vgetq_lane_f32(vminq_f32(a_.neon_f32, b_.neon_f32), 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = (a_.f32[0] < b_.f32[0]) ? a_.f32[0] : b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_ss(a, b) simde_mm_min_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_movehl_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_movehl_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a32 = vget_high_f32(a_.neon_f32);
float32x2_t b32 = vget_high_f32(b_.neon_f32);
r_.neon_f32 = vcombine_f32(b32, a32);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 6, 7, 2, 3);
#else
r_.f32[0] = b_.f32[2];
r_.f32[1] = b_.f32[3];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movehl_ps(a, b) simde_mm_movehl_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_movelh_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_movelh_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a10 = vget_low_f32(a_.neon_f32);
float32x2_t b10 = vget_low_f32(b_.neon_f32);
r_.neon_f32 = vcombine_f32(a10, b10);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 1, 4, 5);
#else
r_.f32[0] = a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = b_.f32[0];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movelh_ps(a, b) simde_mm_movelh_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_movemask_pi8 (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_movemask_pi8(a);
#else
simde__m64_private a_ = simde__m64_to_private(a);
int r = 0;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
uint8x8_t input = a_.neon_u8;
const int8_t xr[8] = {-7, -6, -5, -4, -3, -2, -1, 0};
const uint8x8_t mask_and = vdup_n_u8(0x80);
const int8x8_t mask_shift = vld1_s8(xr);
const uint8x8_t mask_result = vshl_u8(vand_u8(input, mask_and), mask_shift);
uint8x8_t lo = mask_result;
r = vaddv_u8(lo);
#else
const size_t nmemb = sizeof(a_.i8) / sizeof(a_.i8[0]);
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < nmemb ; i++) {
r |= (a_.u8[nmemb - 1 - i] >> 7) << (nmemb - 1 - i);
}
#endif
return r;
#endif
}
#define simde_m_pmovmskb(a) simde_mm_movemask_pi8(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movemask_pi8(a) simde_mm_movemask_pi8(a)
# define _m_pmovmskb(a) simde_mm_movemask_pi8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_movemask_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_movemask_ps(a);
#else
int r = 0;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
static const int32_t shift_amount[] = { 0, 1, 2, 3 };
const int32x4_t shift = vld1q_s32(shift_amount);
uint32x4_t tmp = vshrq_n_u32(a_.neon_u32, 31);
return HEDLEY_STATIC_CAST(int, vaddvq_u32(vshlq_u32(tmp, shift)));
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
// Shift out everything but the sign bits with a 32-bit unsigned shift right.
uint64x2_t high_bits = vreinterpretq_u64_u32(vshrq_n_u32(a_.neon_u32, 31));
// Merge the two pairs together with a 64-bit unsigned shift right + add.
uint8x16_t paired = vreinterpretq_u8_u64(vsraq_n_u64(high_bits, high_bits, 31));
// Extract the result.
return vgetq_lane_u8(paired, 0) | (vgetq_lane_u8(paired, 8) << 2);
#else
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < sizeof(a_.u32) / sizeof(a_.u32[0]) ; i++) {
r |= (a_.u32[i] >> ((sizeof(a_.u32[i]) * CHAR_BIT) - 1)) << i;
}
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movemask_ps(a) simde_mm_movemask_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mul_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_mul_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vmulq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_mul(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 * b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] * b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mul_ps(a, b) simde_mm_mul_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mul_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_mul_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_mul_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[0] * b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mul_ss(a, b) simde_mm_mul_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_mulhi_pu16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_mulhi_pu16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const uint32x4_t t1 = vmull_u16(a_.neon_u16, b_.neon_u16);
const uint32x4_t t2 = vshrq_n_u32(t1, 16);
const uint16x4_t t3 = vmovn_u32(t2);
r_.neon_u16 = t3;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, ((HEDLEY_STATIC_CAST(uint32_t, a_.u16[i]) * HEDLEY_STATIC_CAST(uint32_t, b_.u16[i])) >> UINT32_C(16)));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mulhi_pu16(a, b) simde_mm_mulhi_pu16(a, b)
# define _m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_prefetch (char const* p, int i) {
#if defined(HEDLEY_GCC_VERSION)
__builtin_prefetch(p);
#else
(void) p;
#endif
(void) i;
}
#if defined(SIMDE_X86_SSE_NATIVE)
# define simde_mm_prefetch(p, i) _mm_prefetch(p, i)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_prefetch(p, i) simde_mm_prefetch(p, i)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_negate_ps(simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return simde_mm_xor_ps(a, _mm_set1_ps(SIMDE_FLOAT32_C(-0.0)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0))
r_.altivec_f32 = vec_neg(a_.altivec_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vnegq_f32(a_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_neg(a_.wasm_v128);
#elif defined(SIMDE_VECTOR_NEGATE)
r_.f32 = -a_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = -a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rcp_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rcp_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t recip = vrecpeq_f32(a_.neon_f32);
#if SIMDE_ACCURACY_PREFERENCE > 0
for (int i = 0; i < SIMDE_ACCURACY_PREFERENCE ; ++i) {
recip = vmulq_f32(recip, vrecpsq_f32(recip, a_.neon_f32));
}
#endif
r_.neon_f32 = recip;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_div(simde_mm_set1_ps(1.0f), a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_re(a_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.f32 = 1.0f / a_.f32;
#elif defined(SIMDE_IEEE754_STORAGE)
/* https://stackoverflow.com/questions/12227126/division-as-multiply-and-lut-fast-float-division-reciprocal/12228234#12228234 */
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
int32_t ix;
simde_float32 fx = a_.f32[i];
simde_memcpy(&ix, &fx, sizeof(ix));
int32_t x = INT32_C(0x7EF311C3) - ix;
simde_float32 temp;
simde_memcpy(&temp, &x, sizeof(temp));
r_.f32[i] = temp * (SIMDE_FLOAT32_C(2.0) - temp * fx);
}
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = 1.0f / a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rcp_ps(a) simde_mm_rcp_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rcp_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rcp_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_rcp_ps(a));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
r_.f32[0] = 1.0f / a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rcp_ss(a) simde_mm_rcp_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rsqrt_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rsqrt_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vrsqrteq_f32(a_.neon_f32);
#elif defined(SIMDE_IEEE754_STORAGE)
/* https://basesandframes.files.wordpress.com/2020/04/even_faster_math_functions_green_2020.pdf
Pages 100 - 103 */
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
#if SIMDE_ACCURACY_PREFERENCE <= 0
r_.i32[i] = INT32_C(0x5F37624F) - (a_.i32[i] >> 1);
#else
simde_float32 x = a_.f32[i];
simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x;
int32_t ix;
simde_memcpy(&ix, &x, sizeof(ix));
#if SIMDE_ACCURACY_PREFERENCE == 1
ix = INT32_C(0x5F375A82) - (ix >> 1);
#else
ix = INT32_C(0x5F37599E) - (ix >> 1);
#endif
simde_memcpy(&x, &ix, sizeof(x));
#if SIMDE_ACCURACY_PREFERENCE >= 2
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
#endif
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
r_.f32[i] = x;
#endif
}
#elif defined(simde_math_sqrtf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = 1.0f / simde_math_sqrtf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rsqrt_ps(a) simde_mm_rsqrt_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rsqrt_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rsqrt_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_rsqrt_ps(a));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(simde_mm_rsqrt_ps(a).neon_f32, 0), a_.neon_f32, 0);
#elif defined(SIMDE_IEEE754_STORAGE)
{
#if SIMDE_ACCURACY_PREFERENCE <= 0
r_.i32[0] = INT32_C(0x5F37624F) - (a_.i32[0] >> 1);
#else
simde_float32 x = a_.f32[0];
simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x;
int32_t ix;
simde_memcpy(&ix, &x, sizeof(ix));
#if SIMDE_ACCURACY_PREFERENCE == 1
ix = INT32_C(0x5F375A82) - (ix >> 1);
#else
ix = INT32_C(0x5F37599E) - (ix >> 1);
#endif
simde_memcpy(&x, &ix, sizeof(x));
#if SIMDE_ACCURACY_PREFERENCE >= 2
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
#endif
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
r_.f32[0] = x;
#endif
}
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#elif defined(simde_math_sqrtf)
r_.f32[0] = 1.0f / simde_math_sqrtf(a_.f32[0]);
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rsqrt_ss(a) simde_mm_rsqrt_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_sad_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_sad_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint16x4_t t = vpaddl_u8(vabd_u8(a_.neon_u8, b_.neon_u8));
uint16_t r0 = t[0] + t[1] + t[2] + t[3];
r_.neon_u16 = vset_lane_u16(r0, vdup_n_u16(0), 0);
#else
uint16_t sum = 0;
#if defined(SIMDE_HAVE_STDLIB_H)
SIMDE_VECTORIZE_REDUCTION(+:sum)
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
sum += HEDLEY_STATIC_CAST(uint8_t, abs(a_.u8[i] - b_.u8[i]));
}
r_.i16[0] = HEDLEY_STATIC_CAST(int16_t, sum);
r_.i16[1] = 0;
r_.i16[2] = 0;
r_.i16[3] = 0;
#else
HEDLEY_UNREACHABLE();
#endif
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psadbw(a, b) simde_mm_sad_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sad_pu8(a, b) simde_mm_sad_pu8(a, b)
# define _m_psadbw(a, b) simde_mm_sad_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ss (simde_float32 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ss(a);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsetq_lane_f32(a, vdupq_n_f32(SIMDE_FLOAT32_C(0.0)), 0);
#else
return simde_mm_set_ps(SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), a);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ss(a) simde_mm_set_ss(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_setr_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_setr_ps(e3, e2, e1, e0);
#else
return simde_mm_set_ps(e0, e1, e2, e3);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_setr_ps(e3, e2, e1, e0) simde_mm_setr_ps(e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_setzero_ps (void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_setzero_ps();
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vdupq_n_f32(SIMDE_FLOAT32_C(0.0));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_splats(SIMDE_FLOAT32_C(0.0));
#else
simde__m128 r;
simde_memset(&r, 0, sizeof(r));
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_setzero_ps() simde_mm_setzero_ps()
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_undefined_ps (void) {
simde__m128_private r_;
#if defined(SIMDE_HAVE_UNDEFINED128)
r_.n = _mm_undefined_ps();
#elif !defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
r_ = simde__m128_to_private(simde_mm_setzero_ps());
#endif
return simde__m128_from_private(r_);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_undefined_ps() simde_mm_undefined_ps()
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_POP
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_setone_ps (void) {
simde__m128 t = simde_mm_setzero_ps();
return simde_mm_cmpeq_ps(t, t);
}
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_sfence (void) {
/* TODO: Use Hedley. */
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_sfence();
#elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7))
__atomic_thread_fence(__ATOMIC_SEQ_CST);
#elif !defined(__INTEL_COMPILER) && defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__)
#if defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ < 9)
__atomic_thread_fence(__ATOMIC_SEQ_CST);
#else
atomic_thread_fence(memory_order_seq_cst);
#endif
#elif defined(_MSC_VER)
MemoryBarrier();
#elif HEDLEY_HAS_EXTENSION(c_atomic)
__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
#elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
__sync_synchronize();
#elif defined(_OPENMP)
#pragma omp critical(simde_mm_sfence_)
{ }
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sfence() simde_mm_sfence()
#endif
#define SIMDE_MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _MM_SHUFFLE(z, y, x, w) SIMDE_MM_SHUFFLE(z, y, x, w)
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# define simde_mm_shuffle_pi16(a, imm8) _mm_shuffle_pi16(a, imm8)
#elif defined(SIMDE_SHUFFLE_VECTOR_)
# define simde_mm_shuffle_pi16(a, imm8) (__extension__ ({ \
const simde__m64_private simde__tmp_a_ = simde__m64_to_private(a); \
simde__m64_from_private((simde__m64_private) { .i16 = \
SIMDE_SHUFFLE_VECTOR_(16, 8, \
(simde__tmp_a_).i16, \
(simde__tmp_a_).i16, \
(((imm8) ) & 3), \
(((imm8) >> 2) & 3), \
(((imm8) >> 4) & 3), \
(((imm8) >> 6) & 3)) }); }))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_shuffle_pi16 (simde__m64 a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
for (size_t i = 0 ; i < sizeof(r_.i16) / sizeof(r_.i16[0]) ; i++) {
r_.i16[i] = a_.i16[(imm8 >> (i * 2)) & 3];
}
HEDLEY_DIAGNOSTIC_PUSH
#if HEDLEY_HAS_WARNING("-Wconditional-uninitialized")
# pragma clang diagnostic ignored "-Wconditional-uninitialized"
#endif
return simde__m64_from_private(r_);
HEDLEY_DIAGNOSTIC_POP
}
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# define simde_m_pshufw(a, imm8) _m_pshufw(a, imm8)
#else
# define simde_m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_shuffle_pi16(a, imm8) simde_mm_shuffle_pi16(a, imm8)
# define _m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8)
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
# define simde_mm_shuffle_ps(a, b, imm8) _mm_shuffle_ps(a, b, imm8)
#elif defined(SIMDE_SHUFFLE_VECTOR_)
# define simde_mm_shuffle_ps(a, b, imm8) (__extension__ ({ \
simde__m128_from_private((simde__m128_private) { .f32 = \
SIMDE_SHUFFLE_VECTOR_(32, 16, \
simde__m128_to_private(a).f32, \
simde__m128_to_private(b).f32, \
(((imm8) ) & 3), \
(((imm8) >> 2) & 3), \
(((imm8) >> 4) & 3) + 4, \
(((imm8) >> 6) & 3) + 4) }); }))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_shuffle_ps (simde__m128 a, simde__m128 b, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[(imm8 >> 0) & 3];
r_.f32[1] = a_.f32[(imm8 >> 2) & 3];
r_.f32[2] = b_.f32[(imm8 >> 4) & 3];
r_.f32[3] = b_.f32[(imm8 >> 6) & 3];
return simde__m128_from_private(r_);
}
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_shuffle_ps(a, b, imm8) simde_mm_shuffle_ps((a), (b), imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sqrt_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sqrt_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vsqrtq_f32(a_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t est = vrsqrteq_f32(a_.neon_f32);
for (int i = 0 ; i <= SIMDE_ACCURACY_PREFERENCE ; i++) {
est = vmulq_f32(vrsqrtsq_f32(vmulq_f32(a_.neon_f32, est), est), est);
}
r_.neon_f32 = vmulq_f32(a_.neon_f32, est);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_sqrt(a_.wasm_v128);
#elif defined(simde_math_sqrt)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < sizeof(r_.f32) / sizeof(r_.f32[0]) ; i++) {
r_.f32[i] = simde_math_sqrtf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sqrt_ps(a) simde_mm_sqrt_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sqrt_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sqrt_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_sqrt_ps(a));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value =
vgetq_lane_f32(simde__m128_to_private(simde_mm_sqrt_ps(a)).neon_f32, 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#elif defined(simde_math_sqrtf)
r_.f32[0] = simde_math_sqrtf(a_.f32[0]);
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sqrt_ss(a) simde_mm_sqrt_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store_ps (simde_float32 mem_addr[4], simde__m128 a) {
simde_assert_aligned(16, mem_addr);
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr, a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
vec_vsx_st(a_.altivec_f32, 0, mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P5_NATIVE)
vec_st(a_.altivec_f32, 0, mem_addr);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
wasm_v128_store(mem_addr, a_.wasm_v128);
#else
SIMDE_VECTORIZE_ALIGNED(mem_addr:16)
for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) {
mem_addr[i] = a_.f32[i];
}
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ps(mem_addr, a) simde_mm_store_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store_ps1 (simde_float32 mem_addr[4], simde__m128 a) {
simde_assert_aligned(16, mem_addr);
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ps1(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
mem_addr[0] = vgetq_lane_f32(a_.neon_f32, 0);
mem_addr[1] = vgetq_lane_f32(a_.neon_f32, 0);
mem_addr[2] = vgetq_lane_f32(a_.neon_f32, 0);
mem_addr[3] = vgetq_lane_f32(a_.neon_f32, 0);
#else
SIMDE_VECTORIZE_ALIGNED(mem_addr:16)
for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) {
mem_addr[i] = a_.f32[0];
}
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ps1(mem_addr, a) simde_mm_store_ps1(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store_ss (simde_float32* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ss(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_lane_f32(mem_addr, a_.neon_f32, 0);
#else
*mem_addr = a_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ss(mem_addr, a) simde_mm_store_ss(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store1_ps (simde_float32 mem_addr[4], simde__m128 a) {
simde_assert_aligned(16, mem_addr);
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store1_ps(mem_addr, a);
#else
simde_mm_store_ps1(mem_addr, a);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store1_ps(mem_addr, a) simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storeh_pi (simde__m64* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storeh_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m64_private* dest_ = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr);
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
dest_->f32[0] = vgetq_lane_f32(a_.neon_f32, 2);
dest_->f32[1] = vgetq_lane_f32(a_.neon_f32, 3);
#else
dest_->f32[0] = a_.f32[2];
dest_->f32[1] = a_.f32[3];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storeh_pi(mem_addr, a) simde_mm_storeh_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storel_pi (simde__m64* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storel_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m64_private* dest_ = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr);
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
dest_->neon_f32 = vget_low_f32(a_.neon_f32);
#else
dest_->f32[0] = a_.f32[0];
dest_->f32[1] = a_.f32[1];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storel_pi(mem_addr, a) simde_mm_storel_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storer_ps (simde_float32 mem_addr[4], simde__m128 a) {
simde_assert_aligned(16, mem_addr);
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storer_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_SHUFFLE_VECTOR_)
a_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 3, 2, 1, 0);
simde_mm_store_ps(mem_addr, simde__m128_from_private(a_));
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
mem_addr[0] = vgetq_lane_f32(a_.neon_f32, 3);
mem_addr[1] = vgetq_lane_f32(a_.neon_f32, 2);
mem_addr[2] = vgetq_lane_f32(a_.neon_f32, 1);
mem_addr[3] = vgetq_lane_f32(a_.neon_f32, 0);
#else
SIMDE_VECTORIZE_ALIGNED(mem_addr:16)
for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) {
mem_addr[i] = a_.f32[((sizeof(a_.f32) / sizeof(a_.f32[0])) - 1) - i];
}
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storer_ps(mem_addr, a) simde_mm_storer_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storeu_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storeu_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr, a_.neon_f32);
#else
simde_memcpy(mem_addr, &a_, sizeof(a_));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storeu_ps(mem_addr, a) simde_mm_storeu_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sub_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sub_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsubq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_sub(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 - b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] - b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sub_ps(a, b) simde_mm_sub_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sub_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sub_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_sub_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[0] - b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sub_ss(a, b) simde_mm_sub_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomieq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomieq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] == b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] == b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomieq_ss(a, b) simde_mm_ucomieq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomige_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomige_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] >= b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] >= b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomige_ss(a, b) simde_mm_ucomige_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomigt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomigt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] > b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] > b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomigt_ss(a, b) simde_mm_ucomigt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomile_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomile_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] <= b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] <= b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomile_ss(a, b) simde_mm_ucomile_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomilt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomilt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] < b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] < b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomilt_ss(a, b) simde_mm_ucomilt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomineq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomineq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] != b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] != b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomineq_ss(a, b) simde_mm_ucomineq_ss((a), (b))
#endif
#if defined(SIMDE_X86_SSE_NATIVE)
# if defined(__has_builtin)
# if __has_builtin(__builtin_ia32_undef128)
# define SIMDE_HAVE_UNDEFINED128
# endif
# elif !defined(__PGI) && !defined(SIMDE_BUG_GCC_REV_208793) && !defined(_MSC_VER)
# define SIMDE_HAVE_UNDEFINED128
# endif
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_unpackhi_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_unpackhi_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vzip2q_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a1 = vget_high_f32(a_.neon_f32);
float32x2_t b1 = vget_high_f32(b_.neon_f32);
float32x2x2_t result = vzip_f32(a1, b1);
r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 2, 6, 3, 7);
#else
r_.f32[0] = a_.f32[2];
r_.f32[1] = b_.f32[2];
r_.f32[2] = a_.f32[3];
r_.f32[3] = b_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_unpackhi_ps(a, b) simde_mm_unpackhi_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_unpacklo_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_unpacklo_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vzip1q_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 4, 1, 5);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a1 = vget_low_f32(a_.neon_f32);
float32x2_t b1 = vget_low_f32(b_.neon_f32);
float32x2x2_t result = vzip_f32(a1, b1);
r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]);
#else
r_.f32[0] = a_.f32[0];
r_.f32[1] = b_.f32[0];
r_.f32[2] = a_.f32[1];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_unpacklo_ps(a, b) simde_mm_unpacklo_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_stream_pi (simde__m64* mem_addr, simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
_mm_stream_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m64_private*
dest = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr),
a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
dest->i64[0] = vget_lane_s64(a_.neon_i64, 0);
#else
dest->i64[0] = a_.i64[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_stream_pi(mem_addr, a) simde_mm_stream_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_stream_ps (simde_float32 mem_addr[4], simde__m128 a) {
simde_assert_aligned(16, mem_addr);
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_stream_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(SIMDE_ASSUME_ALIGNED(16, mem_addr), a_.neon_f32);
#else
simde_memcpy(SIMDE_ASSUME_ALIGNED(16, mem_addr), &a_, sizeof(a_));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_stream_ps(mem_addr, a) simde_mm_stream_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
do { \
float32x4x2_t ROW01 = vtrnq_f32(row0, row1); \
float32x4x2_t ROW23 = vtrnq_f32(row2, row3); \
row0 = vcombine_f32(vget_low_f32(ROW01.val[0]), \
vget_low_f32(ROW23.val[0])); \
row1 = vcombine_f32(vget_low_f32(ROW01.val[1]), \
vget_low_f32(ROW23.val[1])); \
row2 = vcombine_f32(vget_high_f32(ROW01.val[0]), \
vget_high_f32(ROW23.val[0])); \
row3 = vcombine_f32(vget_high_f32(ROW01.val[1]), \
vget_high_f32(ROW23.val[1])); \
} while (0)
#else
#define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
do { \
simde__m128 tmp3, tmp2, tmp1, tmp0; \
tmp0 = simde_mm_unpacklo_ps((row0), (row1)); \
tmp2 = simde_mm_unpacklo_ps((row2), (row3)); \
tmp1 = simde_mm_unpackhi_ps((row0), (row1)); \
tmp3 = simde_mm_unpackhi_ps((row2), (row3)); \
row0 = simde_mm_movelh_ps(tmp0, tmp2); \
row1 = simde_mm_movehl_ps(tmp2, tmp0); \
row2 = simde_mm_movelh_ps(tmp1, tmp3); \
row3 = simde_mm_movehl_ps(tmp3, tmp1); \
} while (0)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3)
#endif
#if defined(_MM_EXCEPT_INVALID)
# define SIMDE_MM_EXCEPT_INVALID _MM_EXCEPT_INVALID
#else
# define SIMDE_MM_EXCEPT_INVALID (0x0001)
#endif
#if defined(_MM_EXCEPT_DENORM)
# define SIMDE_MM_EXCEPT_DENORM _MM_EXCEPT_DENORM
#else
# define SIMDE_MM_EXCEPT_DENORM (0x0002)
#endif
#if defined(_MM_EXCEPT_DIV_ZERO)
# define SIMDE_MM_EXCEPT_DIV_ZERO _MM_EXCEPT_DIV_ZERO
#else
# define SIMDE_MM_EXCEPT_DIV_ZERO (0x0004)
#endif
#if defined(_MM_EXCEPT_OVERFLOW)
# define SIMDE_MM_EXCEPT_OVERFLOW _MM_EXCEPT_OVERFLOW
#else
# define SIMDE_MM_EXCEPT_OVERFLOW (0x0008)
#endif
#if defined(_MM_EXCEPT_UNDERFLOW)
# define SIMDE_MM_EXCEPT_UNDERFLOW _MM_EXCEPT_UNDERFLOW
#else
# define SIMDE_MM_EXCEPT_UNDERFLOW (0x0010)
#endif
#if defined(_MM_EXCEPT_INEXACT)
# define SIMDE_MM_EXCEPT_INEXACT _MM_EXCEPT_INEXACT
#else
# define SIMDE_MM_EXCEPT_INEXACT (0x0020)
#endif
#if defined(_MM_EXCEPT_MASK)
# define SIMDE_MM_EXCEPT_MASK _MM_EXCEPT_MASK
#else
# define SIMDE_MM_EXCEPT_MASK \
(SIMDE_MM_EXCEPT_INVALID | SIMDE_MM_EXCEPT_DENORM | \
SIMDE_MM_EXCEPT_DIV_ZERO | SIMDE_MM_EXCEPT_OVERFLOW | \
SIMDE_MM_EXCEPT_UNDERFLOW | SIMDE_MM_EXCEPT_INEXACT)
#endif
#if defined(_MM_MASK_INVALID)
# define SIMDE_MM_MASK_INVALID _MM_MASK_INVALID
#else
# define SIMDE_MM_MASK_INVALID (0x0080)
#endif
#if defined(_MM_MASK_DENORM)
# define SIMDE_MM_MASK_DENORM _MM_MASK_DENORM
#else
# define SIMDE_MM_MASK_DENORM (0x0100)
#endif
#if defined(_MM_MASK_DIV_ZERO)
# define SIMDE_MM_MASK_DIV_ZERO _MM_MASK_DIV_ZERO
#else
# define SIMDE_MM_MASK_DIV_ZERO (0x0200)
#endif
#if defined(_MM_MASK_OVERFLOW)
# define SIMDE_MM_MASK_OVERFLOW _MM_MASK_OVERFLOW
#else
# define SIMDE_MM_MASK_OVERFLOW (0x0400)
#endif
#if defined(_MM_MASK_UNDERFLOW)
# define SIMDE_MM_MASK_UNDERFLOW _MM_MASK_UNDERFLOW
#else
# define SIMDE_MM_MASK_UNDERFLOW (0x0800)
#endif
#if defined(_MM_MASK_INEXACT)
# define SIMDE_MM_MASK_INEXACT _MM_MASK_INEXACT
#else
# define SIMDE_MM_MASK_INEXACT (0x1000)
#endif
#if defined(_MM_MASK_MASK)
# define SIMDE_MM_MASK_MASK _MM_MASK_MASK
#else
# define SIMDE_MM_MASK_MASK \
(SIMDE_MM_MASK_INVALID | SIMDE_MM_MASK_DENORM | \
SIMDE_MM_MASK_DIV_ZERO | SIMDE_MM_MASK_OVERFLOW | \
SIMDE_MM_MASK_UNDERFLOW | SIMDE_MM_MASK_INEXACT)
#endif
#if defined(_MM_FLUSH_ZERO_MASK)
# define SIMDE_MM_FLUSH_ZERO_MASK _MM_FLUSH_ZERO_MASK
#else
# define SIMDE_MM_FLUSH_ZERO_MASK (0x8000)
#endif
#if defined(_MM_FLUSH_ZERO_ON)
# define SIMDE_MM_FLUSH_ZERO_ON _MM_FLUSH_ZERO_ON
#else
# define SIMDE_MM_FLUSH_ZERO_ON (0x8000)
#endif
#if defined(_MM_FLUSH_ZERO_OFF)
# define SIMDE_MM_FLUSH_ZERO_OFF _MM_FLUSH_ZERO_OFF
#else
# define SIMDE_MM_FLUSH_ZERO_OFF (0x0000)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_SSE_H) */
|
composite.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE %
% C O O MM MM P P O O SS I T E %
% C O O M M M PPPP O O SSS I T EEE %
% C O O M M P O O SS I T E %
% CCCC OOO M M P OOO SSSSS IIIII T EEEEE %
% %
% %
% MagickCore Image Composite Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resample.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p o s i t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompositeImage() returns the second image composited onto the first
% at the specified offset, using the specified composite method.
%
% The format of the CompositeImage method is:
%
% MagickBooleanType CompositeImage(Image *image,
% const Image *source_image,const CompositeOperator compose,
% const MagickBooleanType clip_to_self,const ssize_t x_offset,
% const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the canvas image, modified by he composition
%
% o source_image: the source image.
%
% o compose: This operator affects how the composite is applied to
% the image. The operators and how they are utilized are listed here
% http://www.w3.org/TR/SVG12/#compositing.
%
% o clip_to_self: set to MagickTrue to limit composition to area composed.
%
% o x_offset: the column offset of the composited image.
%
% o y_offset: the row offset of the composited image.
%
% Extra Controls from Image meta-data in 'image' (artifacts)
%
% o "compose:args"
% A string containing extra numerical arguments for specific compose
% methods, generally expressed as a 'geometry' or a comma separated list
% of numbers.
%
% Compose methods needing such arguments include "BlendCompositeOp" and
% "DisplaceCompositeOp".
%
% o exception: return any errors or warnings in this structure.
%
*/
/*
Composition based on the SVG specification:
A Composition is defined by...
Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors
Blending areas : X = 1 for area of overlap, ie: f(Sc,Dc)
Y = 1 for source preserved
Z = 1 for canvas preserved
Conversion to transparency (then optimized)
Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa)
Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa)
Where...
Sca = Sc*Sa normalized Source color divided by Source alpha
Dca = Dc*Da normalized Dest color divided by Dest alpha
Dc' = Dca'/Da' the desired color value for this channel.
Da' in in the follow formula as 'gamma' The resulting alpla value.
Most functions use a blending mode of over (X=1,Y=1,Z=1) this results in
the following optimizations...
gamma = Sa+Da-Sa*Da;
gamma = 1 - QuantumScale*alpha * QuantumScale*beta;
opacity = QuantumScale*alpha*beta; // over blend, optimized 1-Gamma
The above SVG definitions also define that Mathematical Composition
methods should use a 'Over' blending mode for Alpha Channel.
It however was not applied for composition modes of 'Plus', 'Minus',
the modulus versions of 'Add' and 'Subtract'.
Mathematical operator changes to be applied from IM v6.7...
1) Modulus modes 'Add' and 'Subtract' are obsoleted and renamed
'ModulusAdd' and 'ModulusSubtract' for clarity.
2) All mathematical compositions work as per the SVG specification
with regard to blending. This now includes 'ModulusAdd' and
'ModulusSubtract'.
3) When the special channel flag 'sync' (syncronize channel updates)
is turned off (enabled by default) then mathematical compositions are
only performed on the channels specified, and are applied
independantally of each other. In other words the mathematics is
performed as 'pure' mathematical operations, rather than as image
operations.
*/
static void HCLComposite(const MagickRealType hue,const MagickRealType chroma,
const MagickRealType luma,MagickRealType *red,MagickRealType *green,
MagickRealType *blue)
{
MagickRealType
b,
c,
g,
h,
m,
r,
x;
/*
Convert HCL to RGB colorspace.
*/
assert(red != (MagickRealType *) NULL);
assert(green != (MagickRealType *) NULL);
assert(blue != (MagickRealType *) NULL);
h=6.0*hue;
c=chroma;
x=c*(1.0-fabs(fmod(h,2.0)-1.0));
r=0.0;
g=0.0;
b=0.0;
if ((0.0 <= h) && (h < 1.0))
{
r=c;
g=x;
}
else
if ((1.0 <= h) && (h < 2.0))
{
r=x;
g=c;
}
else
if ((2.0 <= h) && (h < 3.0))
{
g=c;
b=x;
}
else
if ((3.0 <= h) && (h < 4.0))
{
g=x;
b=c;
}
else
if ((4.0 <= h) && (h < 5.0))
{
r=x;
b=c;
}
else
if ((5.0 <= h) && (h < 6.0))
{
r=c;
b=x;
}
m=luma-(0.298839*r+0.586811*g+0.114350*b);
*red=QuantumRange*(r+m);
*green=QuantumRange*(g+m);
*blue=QuantumRange*(b+m);
}
static void CompositeHCL(const MagickRealType red,const MagickRealType green,
const MagickRealType blue,MagickRealType *hue,MagickRealType *chroma,
MagickRealType *luma)
{
MagickRealType
b,
c,
g,
h,
max,
r;
/*
Convert RGB to HCL colorspace.
*/
assert(hue != (MagickRealType *) NULL);
assert(chroma != (MagickRealType *) NULL);
assert(luma != (MagickRealType *) NULL);
r=red;
g=green;
b=blue;
max=MagickMax(r,MagickMax(g,b));
c=max-(MagickRealType) MagickMin(r,MagickMin(g,b));
h=0.0;
if (c == 0)
h=0.0;
else
if (red == max)
h=fmod((g-b)/c+6.0,6.0);
else
if (green == max)
h=((b-r)/c)+2.0;
else
if (blue == max)
h=((r-g)/c)+4.0;
*hue=(h/6.0);
*chroma=QuantumScale*c;
*luma=QuantumScale*(0.298839*r+0.586811*g+0.114350*b);
}
static MagickBooleanType CompositeOverImage(Image *image,
const Image *source_image,const MagickBooleanType clip_to_self,
const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception)
{
#define CompositeImageTag "Composite/Image"
CacheView
*image_view,
*source_view;
const char
*value;
MagickBooleanType
clamp,
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Composite image.
*/
status=MagickTrue;
progress=0;
clamp=MagickTrue;
value=GetImageArtifact(image,"compose:clamp");
if (value != (const char *) NULL)
clamp=IsStringTrue(value);
status=MagickTrue;
progress=0;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*pixels;
PixelInfo
canvas_pixel,
source_pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
if (clip_to_self != MagickFalse)
{
if (y < y_offset)
continue;
if ((y-y_offset) >= (ssize_t) source_image->rows)
continue;
}
/*
If pixels is NULL, y is outside overlay region.
*/
pixels=(Quantum *) NULL;
p=(Quantum *) NULL;
if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows))
{
p=GetCacheViewVirtualPixels(source_view,0,y-y_offset,
source_image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixels=p;
if (x_offset < 0)
p-=x_offset*GetPixelChannels(source_image);
}
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&canvas_pixel);
GetPixelInfo(source_image,&source_pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
MagickRealType
alpha,
Da,
Dc,
Dca,
Sa,
Sc,
Sca;
register ssize_t
i;
size_t
channels;
if (clip_to_self != MagickFalse)
{
if (x < x_offset)
{
q+=GetPixelChannels(image);
continue;
}
if ((x-x_offset) >= (ssize_t) source_image->columns)
break;
}
if ((pixels == (Quantum *) NULL) || (x < x_offset) ||
((x-x_offset) >= (ssize_t) source_image->columns))
{
Quantum
source[MaxPixelChannels];
/*
Virtual composite:
Sc: source color.
Dc: canvas color.
*/
(void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source,
exception);
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait))
continue;
if (channel == AlphaPixelChannel)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=(MagickRealType) q[i];
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
}
q+=GetPixelChannels(image);
continue;
}
/*
Authentic composite:
Sa: normalized source alpha.
Da: normalized canvas alpha.
*/
Sa=QuantumScale*GetPixelAlpha(source_image,p);
Da=QuantumScale*GetPixelAlpha(image,q);
alpha=Sa+Da-Sa*Da;
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((source_traits == UndefinedPixelTrait) &&
(channel != AlphaPixelChannel))
continue;
if (channel == AlphaPixelChannel)
{
/*
Set alpha channel.
*/
pixel=QuantumRange*alpha;
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
continue;
}
/*
Sc: source color.
Dc: canvas color.
*/
Sc=(MagickRealType) GetPixelChannel(source_image,channel,p);
Dc=(MagickRealType) q[i];
if ((traits & CopyPixelTrait) != 0)
{
/*
Copy channel.
*/
q[i]=ClampToQuantum(Sc);
continue;
}
/*
Porter-Duff compositions:
Sca: source normalized color multiplied by alpha.
Dca: normalized canvas color multiplied by alpha.
*/
Sca=QuantumScale*Sa*Sc;
Dca=QuantumScale*Da*Dc;
gamma=PerceptibleReciprocal(alpha);
pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa));
q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel);
}
p+=GetPixelChannels(source_image);
channels=GetPixelChannels(source_image);
if (p >= (pixels+channels*source_image->columns))
p=pixels;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CompositeImage)
#endif
proceed=SetImageProgress(image,CompositeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
MagickExport MagickBooleanType CompositeImage(Image *image,
const Image *composite,const CompositeOperator compose,
const MagickBooleanType clip_to_self,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define CompositeImageTag "Composite/Image"
CacheView
*source_view,
*image_view;
const char
*value;
GeometryInfo
geometry_info;
Image
*canvas_image,
*source_image;
MagickBooleanType
clamp,
status;
MagickOffsetType
progress;
MagickRealType
amount,
canvas_dissolve,
midpoint,
percent_luma,
percent_chroma,
source_dissolve,
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(composite != (Image *) NULL);
assert(composite->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
source_image=CloneImage(composite,0,0,MagickTrue,exception);
if (source_image == (const Image *) NULL)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
(void) SetImageColorspace(source_image,image->colorspace,exception);
if ((compose == OverCompositeOp) || (compose == SrcOverCompositeOp))
{
status=CompositeOverImage(image,source_image,clip_to_self,x_offset,
y_offset,exception);
source_image=DestroyImage(source_image);
return(status);
}
amount=0.5;
canvas_image=(Image *) NULL;
canvas_dissolve=1.0;
clamp=MagickTrue;
value=GetImageArtifact(image,"compose:clamp");
if (value != (const char *) NULL)
clamp=IsStringTrue(value);
SetGeometryInfo(&geometry_info);
percent_luma=100.0;
percent_chroma=100.0;
source_dissolve=1.0;
threshold=0.05f;
switch (compose)
{
case CopyCompositeOp:
{
if ((x_offset < 0) || (y_offset < 0))
break;
if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns)
break;
if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows)
break;
status=MagickTrue;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source_image,image,source_image->rows,1)
#endif
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*p;
register Quantum
*q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset,
source_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source_image->columns; x++)
{
register ssize_t
i;
if (GetPixelReadMask(source_image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,
channel);
if (traits == UndefinedPixelTrait)
continue;
if (source_traits != UndefinedPixelTrait)
SetPixelChannel(image,channel,p[i],q);
else if (channel == AlphaPixelChannel)
SetPixelChannel(image,channel,OpaqueAlpha,q);
}
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CompositeImage)
#endif
proceed=SetImageProgress(image,CompositeImageTag,
(MagickOffsetType) y,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
return(status);
}
case IntensityCompositeOp:
{
if ((x_offset < 0) || (y_offset < 0))
break;
if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns)
break;
if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows)
break;
status=MagickTrue;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source_image,image,source_image->rows,1)
#endif
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*p;
register Quantum
*q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset,
source_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (GetPixelReadMask(source_image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
continue;
}
SetPixelAlpha(image,clamp != MagickFalse ?
ClampPixel(GetPixelIntensity(source_image,p)) :
ClampToQuantum(GetPixelIntensity(source_image,p)),q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CompositeImage)
#endif
proceed=SetImageProgress(image,CompositeImageTag,
(MagickOffsetType) y,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
return(status);
}
case CopyAlphaCompositeOp:
case ChangeMaskCompositeOp:
{
/*
Modify canvas outside the overlaid region and require an alpha
channel to exist, to add transparency.
*/
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case BlurCompositeOp:
{
CacheView
*canvas_view;
MagickRealType
angle_range,
angle_start,
height,
width;
PixelInfo
pixel;
ResampleFilter
*resample_filter;
SegmentInfo
blur;
/*
Blur Image by resampling.
Blur Image dictated by an overlay gradient map: X = red_channel;
Y = green_channel; compose:args = x_scale[,y_scale[,angle]].
*/
canvas_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (canvas_image == (Image *) NULL)
{
source_image=DestroyImage(source_image);
return(MagickFalse);
}
/*
Gather the maximum blur sigma values from user.
*/
flags=NoValue;
value=GetImageArtifact(image,"compose:args");
if (value != (const char *) NULL)
flags=ParseGeometry(value,&geometry_info);
if ((flags & WidthValue) == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"InvalidSetting","'%s' '%s'","compose:args",value);
source_image=DestroyImage(source_image);
canvas_image=DestroyImage(canvas_image);
return(MagickFalse);
}
/*
Users input sigma now needs to be converted to the EWA ellipse size.
The filter defaults to a sigma of 0.5 so to make this match the
users input the ellipse size needs to be doubled.
*/
width=height=geometry_info.rho*2.0;
if ((flags & HeightValue) != 0 )
height=geometry_info.sigma*2.0;
/*
Default the unrotated ellipse width and height axis vectors.
*/
blur.x1=width;
blur.x2=0.0;
blur.y1=0.0;
blur.y2=height;
/* rotate vectors if a rotation angle is given */
if ((flags & XValue) != 0 )
{
MagickRealType
angle;
angle=DegreesToRadians(geometry_info.xi);
blur.x1=width*cos(angle);
blur.x2=width*sin(angle);
blur.y1=(-height*sin(angle));
blur.y2=height*cos(angle);
}
/* Otherwise lets set a angle range and calculate in the loop */
angle_start=0.0;
angle_range=0.0;
if ((flags & YValue) != 0 )
{
angle_start=DegreesToRadians(geometry_info.xi);
angle_range=DegreesToRadians(geometry_info.psi)-angle_start;
}
/*
Set up a gaussian cylindrical filter for EWA Bluring.
As the minimum ellipse radius of support*1.0 the EWA algorithm
can only produce a minimum blur of 0.5 for Gaussian (support=2.0)
This means that even 'No Blur' will be still a little blurry!
The solution (as well as the problem of preventing any user
expert filter settings, is to set our own user settings, then
restore them afterwards.
*/
resample_filter=AcquireResampleFilter(image,exception);
SetResampleFilter(resample_filter,GaussianFilter);
/* do the variable blurring of each pixel in image */
GetPixelInfo(image,&pixel);
source_view=AcquireVirtualCacheView(source_image,exception);
canvas_view=AcquireAuthenticCacheView(canvas_image,exception);
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows))
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns))
{
p+=GetPixelChannels(source_image);
continue;
}
if (fabs((double) angle_range) > MagickEpsilon)
{
MagickRealType
angle;
angle=angle_start+angle_range*QuantumScale*
GetPixelBlue(source_image,p);
blur.x1=width*cos(angle);
blur.x2=width*sin(angle);
blur.y1=(-height*sin(angle));
blur.y2=height*cos(angle);
}
#if 0
if ( x == 10 && y == 60 ) {
(void) fprintf(stderr, "blur.x=%lf,%lf, blur.y=%lf,%lf\n",blur.x1,
blur.x2,blur.y1, blur.y2);
(void) fprintf(stderr, "scaled by=%lf,%lf\n",QuantumScale*
GetPixelRed(p),QuantumScale*GetPixelGreen(p));
#endif
ScaleResampleFilter(resample_filter,
blur.x1*QuantumScale*GetPixelRed(source_image,p),
blur.y1*QuantumScale*GetPixelGreen(source_image,p),
blur.x2*QuantumScale*GetPixelRed(source_image,p),
blur.y2*QuantumScale*GetPixelGreen(source_image,p) );
(void) ResamplePixelColor(resample_filter,(double) x_offset+x,
(double) y_offset+y,&pixel,exception);
SetPixelViaPixelInfo(canvas_image,&pixel,q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(canvas_image);
}
sync=SyncCacheViewAuthenticPixels(canvas_view,exception);
if (sync == MagickFalse)
break;
}
resample_filter=DestroyResampleFilter(resample_filter);
source_view=DestroyCacheView(source_view);
canvas_view=DestroyCacheView(canvas_view);
source_image=DestroyImage(source_image);
source_image=canvas_image;
break;
}
case DisplaceCompositeOp:
case DistortCompositeOp:
{
CacheView
*canvas_view;
MagickRealType
horizontal_scale,
vertical_scale;
PixelInfo
pixel;
PointInfo
center,
offset;
/*
Displace/Distort based on overlay gradient map:
X = red_channel; Y = green_channel;
compose:args = x_scale[,y_scale[,center.x,center.y]]
*/
canvas_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (canvas_image == (Image *) NULL)
{
source_image=DestroyImage(source_image);
return(MagickFalse);
}
SetGeometryInfo(&geometry_info);
flags=NoValue;
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
flags=ParseGeometry(value,&geometry_info);
if ((flags & (WidthValue | HeightValue)) == 0 )
{
if ((flags & AspectValue) == 0)
{
horizontal_scale=(MagickRealType) (source_image->columns-1)/2.0;
vertical_scale=(MagickRealType) (source_image->rows-1)/2.0;
}
else
{
horizontal_scale=(MagickRealType) (image->columns-1)/2.0;
vertical_scale=(MagickRealType) (image->rows-1)/2.0;
}
}
else
{
horizontal_scale=geometry_info.rho;
vertical_scale=geometry_info.sigma;
if ((flags & PercentValue) != 0)
{
if ((flags & AspectValue) == 0)
{
horizontal_scale*=(source_image->columns-1)/200.0;
vertical_scale*=(source_image->rows-1)/200.0;
}
else
{
horizontal_scale*=(image->columns-1)/200.0;
vertical_scale*=(image->rows-1)/200.0;
}
}
if ((flags & HeightValue) == 0)
vertical_scale=horizontal_scale;
}
/*
Determine fixed center point for absolute distortion map
Absolute distort ==
Displace offset relative to a fixed absolute point
Select that point according to +X+Y user inputs.
default = center of overlay image
arg flag '!' = locations/percentage relative to background image
*/
center.x=(MagickRealType) x_offset;
center.y=(MagickRealType) y_offset;
if (compose == DistortCompositeOp)
{
if ((flags & XValue) == 0)
if ((flags & AspectValue) != 0)
center.x=(MagickRealType) ((image->columns-1)/2.0);
else
center.x=(MagickRealType) (x_offset+(source_image->columns-1)/
2.0);
else
if ((flags & AspectValue) != 0)
center.x=geometry_info.xi;
else
center.x=(MagickRealType) (x_offset+geometry_info.xi);
if ((flags & YValue) == 0)
if ((flags & AspectValue) != 0)
center.y=(MagickRealType) ((image->rows-1)/2.0);
else
center.y=(MagickRealType) (y_offset+(source_image->rows-1)/2.0);
else
if ((flags & AspectValue) != 0)
center.y=geometry_info.psi;
else
center.y=(MagickRealType) (y_offset+geometry_info.psi);
}
/*
Shift the pixel offset point as defined by the provided,
displacement/distortion map. -- Like a lens...
*/
GetPixelInfo(image,&pixel);
image_view=AcquireVirtualCacheView(image,exception);
source_view=AcquireVirtualCacheView(source_image,exception);
canvas_view=AcquireAuthenticCacheView(canvas_image,exception);
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows))
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns))
{
p+=GetPixelChannels(source_image);
continue;
}
/*
Displace the offset.
*/
offset.x=(double) (horizontal_scale*(GetPixelRed(source_image,p)-
(((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType)
QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ?
x : 0);
offset.y=(double) (vertical_scale*(GetPixelGreen(source_image,p)-
(((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType)
QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ?
y : 0);
status=InterpolatePixelInfo(image,image_view,
UndefinedInterpolatePixel,(double) offset.x,(double) offset.y,
&pixel,exception);
if (status == MagickFalse)
break;
/*
Mask with the 'invalid pixel mask' in alpha channel.
*/
pixel.alpha=(MagickRealType) QuantumRange*(QuantumScale*pixel.alpha)*
(QuantumScale*GetPixelAlpha(source_image,p));
SetPixelViaPixelInfo(canvas_image,&pixel,q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(canvas_image);
}
if (x < (ssize_t) source_image->columns)
break;
sync=SyncCacheViewAuthenticPixels(canvas_view,exception);
if (sync == MagickFalse)
break;
}
canvas_view=DestroyCacheView(canvas_view);
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
source_image=canvas_image;
break;
}
case DissolveCompositeOp:
{
/*
Geometry arguments to dissolve factors.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
source_dissolve=geometry_info.rho/100.0;
canvas_dissolve=1.0;
if ((source_dissolve-MagickEpsilon) < 0.0)
source_dissolve=0.0;
if ((source_dissolve+MagickEpsilon) > 1.0)
{
canvas_dissolve=2.0-source_dissolve;
source_dissolve=1.0;
}
if ((flags & SigmaValue) != 0)
canvas_dissolve=geometry_info.sigma/100.0;
if ((canvas_dissolve-MagickEpsilon) < 0.0)
canvas_dissolve=0.0;
}
break;
}
case BlendCompositeOp:
{
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
source_dissolve=geometry_info.rho/100.0;
canvas_dissolve=1.0-source_dissolve;
if ((flags & SigmaValue) != 0)
canvas_dissolve=geometry_info.sigma/100.0;
}
break;
}
case MathematicsCompositeOp:
{
/*
Just collect the values from "compose:args", setting.
Unused values are set to zero automagically.
Arguments are normally a comma separated list, so this probably should
be changed to some 'general comma list' parser, (with a minimum
number of values)
*/
SetGeometryInfo(&geometry_info);
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
(void) ParseGeometry(value,&geometry_info);
break;
}
case ModulateCompositeOp:
{
/*
Determine the luma and chroma scale.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
percent_luma=geometry_info.rho;
if ((flags & SigmaValue) != 0)
percent_chroma=geometry_info.sigma;
}
break;
}
case ThresholdCompositeOp:
{
/*
Determine the amount and threshold.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
amount=geometry_info.rho;
threshold=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
threshold=0.05f;
}
threshold*=QuantumRange;
break;
}
default:
break;
}
/*
Composite image.
*/
status=MagickTrue;
progress=0;
midpoint=((MagickRealType) QuantumRange+1.0)/2;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*pixels;
MagickRealType
blue,
chroma,
green,
hue,
luma,
red;
PixelInfo
canvas_pixel,
source_pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
if (clip_to_self != MagickFalse)
{
if (y < y_offset)
continue;
if ((y-y_offset) >= (ssize_t) source_image->rows)
continue;
}
/*
If pixels is NULL, y is outside overlay region.
*/
pixels=(Quantum *) NULL;
p=(Quantum *) NULL;
if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows))
{
p=GetCacheViewVirtualPixels(source_view,0,y-y_offset,
source_image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixels=p;
if (x_offset < 0)
p-=x_offset*GetPixelChannels(source_image);
}
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
hue=0.0;
chroma=0.0;
luma=0.0;
GetPixelInfo(image,&canvas_pixel);
GetPixelInfo(source_image,&source_pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
MagickRealType
alpha,
Da,
Dc,
Dca,
DcaDa,
Sa,
SaSca,
Sc,
Sca;
register ssize_t
i;
size_t
channels;
if (clip_to_self != MagickFalse)
{
if (x < x_offset)
{
q+=GetPixelChannels(image);
continue;
}
if ((x-x_offset) >= (ssize_t) source_image->columns)
break;
}
if ((pixels == (Quantum *) NULL) || (x < x_offset) ||
((x-x_offset) >= (ssize_t) source_image->columns))
{
Quantum
source[MaxPixelChannels];
/*
Virtual composite:
Sc: source color.
Dc: canvas color.
*/
(void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source,
exception);
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait))
continue;
switch (compose)
{
case AlphaCompositeOp:
case ChangeMaskCompositeOp:
case CopyAlphaCompositeOp:
case DstAtopCompositeOp:
case DstInCompositeOp:
case InCompositeOp:
case OutCompositeOp:
case SrcInCompositeOp:
case SrcOutCompositeOp:
{
if (channel == AlphaPixelChannel)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=(MagickRealType) q[i];
break;
}
case ClearCompositeOp:
case CopyCompositeOp:
case ReplaceCompositeOp:
case SrcCompositeOp:
{
if (channel == AlphaPixelChannel)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=0.0;
break;
}
case BlendCompositeOp:
case DissolveCompositeOp:
{
if (channel == AlphaPixelChannel)
pixel=canvas_dissolve*GetPixelAlpha(source_image,source);
else
pixel=(MagickRealType) source[channel];
break;
}
default:
{
pixel=(MagickRealType) source[channel];
break;
}
}
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
}
q+=GetPixelChannels(image);
continue;
}
/*
Authentic composite:
Sa: normalized source alpha.
Da: normalized canvas alpha.
*/
Sa=QuantumScale*GetPixelAlpha(source_image,p);
Da=QuantumScale*GetPixelAlpha(image,q);
switch (compose)
{
case BumpmapCompositeOp:
{
alpha=GetPixelIntensity(source_image,p)*Sa;
break;
}
case ColorBurnCompositeOp:
case ColorDodgeCompositeOp:
case DarkenCompositeOp:
case DifferenceCompositeOp:
case DivideDstCompositeOp:
case DivideSrcCompositeOp:
case ExclusionCompositeOp:
case HardLightCompositeOp:
case HardMixCompositeOp:
case LinearBurnCompositeOp:
case LinearDodgeCompositeOp:
case LinearLightCompositeOp:
case LightenCompositeOp:
case MathematicsCompositeOp:
case MinusDstCompositeOp:
case MinusSrcCompositeOp:
case ModulusAddCompositeOp:
case ModulusSubtractCompositeOp:
case MultiplyCompositeOp:
case OverlayCompositeOp:
case PegtopLightCompositeOp:
case PinLightCompositeOp:
case ScreenCompositeOp:
case SoftLightCompositeOp:
case VividLightCompositeOp:
{
alpha=RoundToUnity(Sa+Da-Sa*Da);
break;
}
case DstAtopCompositeOp:
case DstInCompositeOp:
case InCompositeOp:
case SrcInCompositeOp:
{
alpha=Sa*Da;
break;
}
case DissolveCompositeOp:
{
alpha=source_dissolve*Sa*(-canvas_dissolve*Da)+source_dissolve*Sa+
canvas_dissolve*Da;
break;
}
case DstOverCompositeOp:
case OverCompositeOp:
case SrcOverCompositeOp:
{
alpha=Sa+Da-Sa*Da;
break;
}
case DstOutCompositeOp:
{
alpha=Da*(1.0-Sa);
break;
}
case OutCompositeOp:
case SrcOutCompositeOp:
{
alpha=Sa*(1.0-Da);
break;
}
case BlendCompositeOp:
case PlusCompositeOp:
{
alpha=RoundToUnity(source_dissolve*Sa+canvas_dissolve*Da);
break;
}
case XorCompositeOp:
{
alpha=Sa+Da-2.0*Sa*Da;
break;
}
default:
{
alpha=1.0;
break;
}
}
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
continue;
}
switch (compose)
{
case ColorizeCompositeOp:
case HueCompositeOp:
case LuminizeCompositeOp:
case ModulateCompositeOp:
case SaturateCompositeOp:
{
GetPixelInfoPixel(source_image,p,&source_pixel);
GetPixelInfoPixel(image,q,&canvas_pixel);
break;
}
default:
break;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel,
sans;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,channel);
if (traits == UndefinedPixelTrait)
continue;
if (channel == AlphaPixelChannel)
{
/*
Set alpha channel.
*/
switch (compose)
{
case AlphaCompositeOp:
{
pixel=QuantumRange*Sa;
break;
}
case AtopCompositeOp:
case CopyBlackCompositeOp:
case CopyBlueCompositeOp:
case CopyCyanCompositeOp:
case CopyGreenCompositeOp:
case CopyMagentaCompositeOp:
case CopyRedCompositeOp:
case CopyYellowCompositeOp:
case SrcAtopCompositeOp:
case DstCompositeOp:
case NoCompositeOp:
{
pixel=QuantumRange*Da;
break;
}
case ChangeMaskCompositeOp:
{
MagickBooleanType
equivalent;
if (Da < 0.5)
{
pixel=(MagickRealType) TransparentAlpha;
break;
}
equivalent=IsFuzzyEquivalencePixel(source_image,p,image,q);
if (equivalent != MagickFalse)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=(MagickRealType) OpaqueAlpha;
break;
}
case ClearCompositeOp:
{
pixel=(MagickRealType) TransparentAlpha;
break;
}
case ColorizeCompositeOp:
case HueCompositeOp:
case LuminizeCompositeOp:
case SaturateCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=QuantumRange*Da;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=QuantumRange*Sa;
break;
}
if (Sa < Da)
{
pixel=QuantumRange*Da;
break;
}
pixel=QuantumRange*Sa;
break;
}
case CopyAlphaCompositeOp:
{
if (source_image->alpha_trait == UndefinedPixelTrait)
pixel=GetPixelIntensity(source_image,p);
else
pixel=QuantumRange*Sa;
break;
}
case CopyCompositeOp:
case DisplaceCompositeOp:
case DistortCompositeOp:
case DstAtopCompositeOp:
case ReplaceCompositeOp:
case SrcCompositeOp:
{
pixel=QuantumRange*Sa;
break;
}
case DarkenIntensityCompositeOp:
{
pixel=Sa*GetPixelIntensity(source_image,p) <
Da*GetPixelIntensity(image,q) ? Sa : Da;
break;
}
case LightenIntensityCompositeOp:
{
pixel=Sa*GetPixelIntensity(source_image,p) >
Da*GetPixelIntensity(image,q) ? Sa : Da;
break;
}
case ModulateCompositeOp:
{
pixel=QuantumRange*Da;
break;
}
case MultiplyCompositeOp:
{
pixel=QuantumRange*Sa*Da;
break;
}
default:
{
pixel=QuantumRange*alpha;
break;
}
}
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
continue;
}
if (source_traits == UndefinedPixelTrait)
continue;
/*
Sc: source color.
Dc: canvas color.
*/
Sc=(MagickRealType) GetPixelChannel(source_image,channel,p);
Dc=(MagickRealType) q[i];
if ((traits & CopyPixelTrait) != 0)
{
/*
Copy channel.
*/
q[i]=ClampToQuantum(Dc);
continue;
}
/*
Porter-Duff compositions:
Sca: source normalized color multiplied by alpha.
Dca: normalized canvas color multiplied by alpha.
*/
Sca=QuantumScale*Sa*Sc;
Dca=QuantumScale*Da*Dc;
SaSca=Sa*PerceptibleReciprocal(Sca);
DcaDa=Dca*PerceptibleReciprocal(Da);
switch (compose)
{
case DarkenCompositeOp:
case LightenCompositeOp:
case ModulusSubtractCompositeOp:
{
gamma=PerceptibleReciprocal(1.0-alpha);
break;
}
default:
{
gamma=PerceptibleReciprocal(alpha);
break;
}
}
pixel=Dc;
switch (compose)
{
case AlphaCompositeOp:
{
pixel=QuantumRange*Sa;
break;
}
case AtopCompositeOp:
case SrcAtopCompositeOp:
{
pixel=QuantumRange*(Sca*Da+Dca*(1.0-Sa));
break;
}
case BlendCompositeOp:
{
pixel=gamma*(source_dissolve*Sa*Sc+canvas_dissolve*Da*Dc);
break;
}
case BlurCompositeOp:
case CopyCompositeOp:
case ReplaceCompositeOp:
case SrcCompositeOp:
{
pixel=QuantumRange*Sca;
break;
}
case DisplaceCompositeOp:
case DistortCompositeOp:
{
pixel=Sc;
break;
}
case BumpmapCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
pixel=QuantumScale*GetPixelIntensity(source_image,p)*Dc;
break;
}
case ChangeMaskCompositeOp:
{
pixel=Dc;
break;
}
case ClearCompositeOp:
{
pixel=0.0;
break;
}
case ColorBurnCompositeOp:
{
if ((Sca == 0.0) && (Dca == Da))
{
pixel=QuantumRange*gamma*(Sa*Da+Dca*(1.0-Sa));
break;
}
if (Sca == 0.0)
{
pixel=QuantumRange*gamma*(Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Sa*Da-Sa*Da*MagickMin(1.0,(1.0-DcaDa)*
SaSca)+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case ColorDodgeCompositeOp:
{
if ((Sca*Da+Dca*Sa) >= Sa*Da)
pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
else
pixel=QuantumRange*gamma*(Dca*Sa*Sa*PerceptibleReciprocal(Sa-Sca)+
Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case ColorizeCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&sans,&sans,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&hue,&chroma,&sans);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case CopyAlphaCompositeOp:
{
pixel=Dc;
break;
}
case CopyBlackCompositeOp:
{
if (channel == BlackPixelChannel)
pixel=(MagickRealType) (QuantumRange-
GetPixelBlack(source_image,p));
break;
}
case CopyBlueCompositeOp:
case CopyYellowCompositeOp:
{
if (channel == BluePixelChannel)
pixel=(MagickRealType) GetPixelBlue(source_image,p);
break;
}
case CopyGreenCompositeOp:
case CopyMagentaCompositeOp:
{
if (channel == GreenPixelChannel)
pixel=(MagickRealType) GetPixelGreen(source_image,p);
break;
}
case CopyRedCompositeOp:
case CopyCyanCompositeOp:
{
if (channel == RedPixelChannel)
pixel=(MagickRealType) GetPixelRed(source_image,p);
break;
}
case DarkenCompositeOp:
{
/*
Darken is equivalent to a 'Minimum' method
OR a greyscale version of a binary 'Or'
OR the 'Intersection' of pixel sets.
*/
if ((Sca*Da) < (Dca*Sa))
{
pixel=QuantumRange*(Sca+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*(Dca+Sca*(1.0-Da));
break;
}
case DarkenIntensityCompositeOp:
{
pixel=Sa*GetPixelIntensity(source_image,p) <
Da*GetPixelIntensity(image,q) ? Sc : Dc;
break;
}
case DifferenceCompositeOp:
{
pixel=QuantumRange*gamma*(Sca+Dca-2.0*MagickMin(Sca*Da,Dca*Sa));
break;
}
case DissolveCompositeOp:
{
pixel=gamma*(source_dissolve*Sa*Sc-source_dissolve*Sa*
canvas_dissolve*Da*Dc+canvas_dissolve*Da*Dc);
break;
}
case DivideDstCompositeOp:
{
if ((fabs((double) Sca) < MagickEpsilon) &&
(fabs((double) Dca) < MagickEpsilon))
{
pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
if (fabs((double) Dca) < MagickEpsilon)
{
pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Sca*Da*Da/Dca+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case DivideSrcCompositeOp:
{
if ((fabs((double) Dca) < MagickEpsilon) &&
(fabs((double) Sca) < MagickEpsilon))
{
pixel=QuantumRange*gamma*(Dca*(1.0-Sa)+Sca*(1.0-Da));
break;
}
if (fabs((double) Sca) < MagickEpsilon)
{
pixel=QuantumRange*gamma*(Da*Sa+Dca*(1.0-Sa)+Sca*(1.0-Da));
break;
}
pixel=QuantumRange*gamma*(Dca*Sa*SaSca+Dca*(1.0-Sa)+Sca*(1.0-Da));
break;
}
case DstAtopCompositeOp:
{
pixel=QuantumRange*(Dca*Sa+Sca*(1.0-Da));
break;
}
case DstCompositeOp:
case NoCompositeOp:
{
pixel=QuantumRange*Dca;
break;
}
case DstInCompositeOp:
{
pixel=QuantumRange*(Dca*Sa);
break;
}
case DstOutCompositeOp:
{
pixel=QuantumRange*(Dca*(1.0-Sa));
break;
}
case DstOverCompositeOp:
{
pixel=QuantumRange*gamma*(Dca+Sca*(1.0-Da));
break;
}
case ExclusionCompositeOp:
{
pixel=QuantumRange*gamma*(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+
Dca*(1.0-Sa));
break;
}
case HardLightCompositeOp:
{
if ((2.0*Sca) < Sa)
{
pixel=QuantumRange*gamma*(2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-
Sa));
break;
}
pixel=QuantumRange*gamma*(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+
Dca*(1.0-Sa));
break;
}
case HardMixCompositeOp:
{
pixel=gamma*(((Sca+Dca) < 1.0) ? 0.0 : QuantumRange);
break;
}
case HueCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&hue,&sans,&sans);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case InCompositeOp:
case SrcInCompositeOp:
{
pixel=QuantumRange*(Sca*Da);
break;
}
case LinearBurnCompositeOp:
{
/*
LinearBurn: as defined by Abode Photoshop, according to
http://www.simplefilter.de/en/basics/mixmods.html is:
f(Sc,Dc) = Sc + Dc - 1
*/
pixel=QuantumRange*gamma*(Sca+Dca-Sa*Da);
break;
}
case LinearDodgeCompositeOp:
{
pixel=gamma*(Sa*Sc+Da*Dc);
break;
}
case LinearLightCompositeOp:
{
/*
LinearLight: as defined by Abode Photoshop, according to
http://www.simplefilter.de/en/basics/mixmods.html is:
f(Sc,Dc) = Dc + 2*Sc - 1
*/
pixel=QuantumRange*gamma*((Sca-Sa)*Da+Sca+Dca);
break;
}
case LightenCompositeOp:
{
if ((Sca*Da) > (Dca*Sa))
{
pixel=QuantumRange*(Sca+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*(Dca+Sca*(1.0-Da));
break;
}
case LightenIntensityCompositeOp:
{
/*
Lighten is equivalent to a 'Maximum' method
OR a greyscale version of a binary 'And'
OR the 'Union' of pixel sets.
*/
pixel=Sa*GetPixelIntensity(source_image,p) >
Da*GetPixelIntensity(image,q) ? Sc : Dc;
break;
}
case LuminizeCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&sans,&sans,&luma);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case MathematicsCompositeOp:
{
/*
'Mathematics' a free form user control mathematical composition
is defined as...
f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D
Where the arguments A,B,C,D are (currently) passed to composite
as a command separated 'geometry' string in "compose:args" image
artifact.
A = a->rho, B = a->sigma, C = a->xi, D = a->psi
Applying the SVG transparency formula (see above), we get...
Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa)
Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) +
Dca*(1.0-Sa)
*/
pixel=QuantumRange*gamma*(geometry_info.rho*Sca*Dca+
geometry_info.sigma*Sca*Da+geometry_info.xi*Dca*Sa+
geometry_info.psi*Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case MinusDstCompositeOp:
{
pixel=gamma*(Sa*Sc+Da*Dc-2.0*Da*Dc*Sa);
break;
}
case MinusSrcCompositeOp:
{
/*
Minus source from canvas.
f(Sc,Dc) = Sc - Dc
*/
pixel=gamma*(Da*Dc+Sa*Sc-2.0*Sa*Sc*Da);
break;
}
case ModulateCompositeOp:
{
ssize_t
offset;
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
offset=(ssize_t) (GetPixelIntensity(source_image,p)-midpoint);
if (offset == 0)
{
pixel=Dc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
luma+=(0.01*percent_luma*offset)/midpoint;
chroma*=0.01*percent_chroma;
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case ModulusAddCompositeOp:
{
pixel=Sc+Dc;
while (pixel > QuantumRange)
pixel-=QuantumRange;
while (pixel < 0.0)
pixel+=QuantumRange;
pixel=(Sa*Da*pixel+Sa*Sc*(1.0-Da)+Da*Dc*(1.0-Sa));
break;
}
case ModulusSubtractCompositeOp:
{
pixel=Sc-Dc;
while (pixel > QuantumRange)
pixel-=QuantumRange;
while (pixel < 0.0)
pixel+=QuantumRange;
pixel=(Sa*Da*pixel+Sa*Sc*(1.0-Da)+Da*Dc*(1.0-Sa));
break;
}
case MultiplyCompositeOp:
{
pixel=QuantumRange*gamma*(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case OutCompositeOp:
case SrcOutCompositeOp:
{
pixel=QuantumRange*(Sca*(1.0-Da));
break;
}
case OverCompositeOp:
case SrcOverCompositeOp:
{
pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa));
break;
}
case OverlayCompositeOp:
{
if ((2.0*Dca) < Da)
{
pixel=QuantumRange*gamma*(2.0*Dca*Sca+Dca*(1.0-Sa)+Sca*(1.0-
Da));
break;
}
pixel=QuantumRange*gamma*(Da*Sa-2.0*(Sa-Sca)*(Da-Dca)+Dca*(1.0-Sa)+
Sca*(1.0-Da));
break;
}
case PegtopLightCompositeOp:
{
/*
PegTop: A Soft-Light alternative: A continuous version of the
Softlight function, producing very similar results.
f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc
http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm.
*/
if (fabs((double) Da) < MagickEpsilon)
{
pixel=QuantumRange*gamma*(Sca);
break;
}
pixel=QuantumRange*gamma*(Dca*Dca*(Sa-2.0*Sca)/Da+Sca*(2.0*Dca+1.0-
Da)+Dca*(1.0-Sa));
break;
}
case PinLightCompositeOp:
{
/*
PinLight: A Photoshop 7 composition method
http://www.simplefilter.de/en/basics/mixmods.html
f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc
*/
if ((Dca*Sa) < (Da*(2.0*Sca-Sa)))
{
pixel=QuantumRange*gamma*(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa));
break;
}
if ((Dca*Sa) > (2.0*Sca*Da))
{
pixel=QuantumRange*gamma*(Sca*Da+Sca+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca);
break;
}
case PlusCompositeOp:
{
pixel=QuantumRange*(Sca+Dca);
break;
}
case SaturateCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&sans,&chroma,&sans);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case ScreenCompositeOp:
{
/*
Screen: a negated multiply:
f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc)
*/
pixel=QuantumRange*gamma*(Sca+Dca-Sca*Dca);
break;
}
case SoftLightCompositeOp:
{
if ((2.0*Sca) < Sa)
{
pixel=QuantumRange*gamma*(Dca*(Sa+(2.0*Sca-Sa)*(1.0-DcaDa))+
Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da))
{
pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*DcaDa*
(4.0*DcaDa+1.0)*(DcaDa-1.0)+7.0*DcaDa)+Sca*(1.0-Da)+
Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(pow(DcaDa,0.5)-
DcaDa)+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case StereoCompositeOp:
{
if (channel == RedPixelChannel)
pixel=(MagickRealType) GetPixelRed(source_image,p);
break;
}
case ThresholdCompositeOp:
{
MagickRealType
delta;
delta=Sc-Dc;
if ((MagickRealType) fabs((double) (2.0*delta)) < threshold)
{
pixel=gamma*Dc;
break;
}
pixel=gamma*(Dc+delta*amount);
break;
}
case VividLightCompositeOp:
{
/*
VividLight: A Photoshop 7 composition method. See
http://www.simplefilter.de/en/basics/mixmods.html.
f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc))
*/
if ((fabs((double) Sa) < MagickEpsilon) ||
(fabs((double) (Sca-Sa)) < MagickEpsilon))
{
pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
if ((2.0*Sca) <= Sa)
{
pixel=QuantumRange*gamma*(Sa*(Da+Sa*(Dca-Da)*
PerceptibleReciprocal(2.0*Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Dca*Sa*Sa*PerceptibleReciprocal(2.0*
(Sa-Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case XorCompositeOp:
{
pixel=QuantumRange*(Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
default:
{
pixel=Sc;
break;
}
}
q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel);
}
p+=GetPixelChannels(source_image);
channels=GetPixelChannels(source_image);
if (p >= (pixels+channels*source_image->columns))
p=pixels;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CompositeImage)
#endif
proceed=SetImageProgress(image,CompositeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
if (canvas_image != (Image * ) NULL)
canvas_image=DestroyImage(canvas_image);
else
source_image=DestroyImage(source_image);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T e x t u r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TextureImage() repeatedly tiles the texture image across and down the image
% canvas.
%
% The format of the TextureImage method is:
%
% MagickBooleanType TextureImage(Image *image,const Image *texture,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o texture_image: This image is the texture to layer on the background.
%
*/
MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture,
ExceptionInfo *exception)
{
#define TextureImageTag "Texture/Image"
CacheView
*image_view,
*texture_view;
Image
*texture_image;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (texture == (const Image *) NULL)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
texture_image=CloneImage(texture,0,0,MagickTrue,exception);
if (texture_image == (const Image *) NULL)
return(MagickFalse);
(void) TransformImageColorspace(texture_image,image->colorspace,exception);
(void) SetImageVirtualPixelMethod(texture_image,TileVirtualPixelMethod,
exception);
status=MagickTrue;
if ((image->compose != CopyCompositeOp) &&
((image->compose != OverCompositeOp) ||
(image->alpha_trait != UndefinedPixelTrait) ||
(texture_image->alpha_trait != UndefinedPixelTrait)))
{
/*
Tile texture onto the image background.
*/
for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture_image->rows)
{
register ssize_t
x;
if (status == MagickFalse)
continue;
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns)
{
MagickBooleanType
thread_status;
thread_status=CompositeImage(image,texture_image,image->compose,
MagickTrue,x+texture_image->tile_offset.x,y+
texture_image->tile_offset.y,exception);
if (thread_status == MagickFalse)
{
status=thread_status;
break;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,TextureImageTag,(MagickOffsetType)
image->rows,image->rows);
texture_image=DestroyImage(texture_image);
return(status);
}
/*
Tile texture onto the image background (optimized).
*/
status=MagickTrue;
texture_view=AcquireVirtualCacheView(texture_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(texture_image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*p,
*pixels;
register ssize_t
x;
register Quantum
*q;
size_t
width;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(texture_view,texture_image->tile_offset.x,
(y+texture_image->tile_offset.y) % texture_image->rows,
texture_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if ((pixels == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns)
{
register ssize_t
j;
p=pixels;
width=texture_image->columns;
if ((x+(ssize_t) width) > (ssize_t) image->columns)
width=image->columns-x;
for (j=0; j < (ssize_t) width; j++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
p+=GetPixelChannels(texture_image);
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(texture_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(texture_image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait texture_traits=GetPixelChannelTraits(texture_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(texture_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(image,channel,p[i],q);
}
p+=GetPixelChannels(texture_image);
q+=GetPixelChannels(image);
}
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
texture_view=DestroyCacheView(texture_view);
image_view=DestroyCacheView(image_view);
texture_image=DestroyImage(texture_image);
return(status);
}
|
common.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_UTILS_COMMON_H_
#define LIGHTGBM_UTILS_COMMON_H_
#if ((defined(sun) || defined(__sun)) && (defined(__SVR4) || defined(__svr4__)))
#include <LightGBM/utils/common_legacy_solaris.h>
#endif
#include <LightGBM/utils/log.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <limits>
#include <string>
#include <algorithm>
#include <chrono>
#include <cmath>
#include <cstdint>
#include <cstdio>
#include <cstring>
#include <functional>
#include <iomanip>
#include <iterator>
#include <map>
#include <memory>
#include <sstream>
#include <type_traits>
#include <unordered_map>
#include <utility>
#include <vector>
#if (!((defined(sun) || defined(__sun)) && (defined(__SVR4) || defined(__svr4__))))
#define FMT_HEADER_ONLY
#include "../../../external_libs/fmt/include/fmt/format.h"
#endif
#include "../../../external_libs/fast_double_parser/include/fast_double_parser.h"
#ifdef _MSC_VER
#include <intrin.h>
#pragma intrinsic(_BitScanReverse)
#endif
#if defined(_MSC_VER)
#include <malloc.h>
#elif MM_MALLOC
#include <mm_malloc.h>
// https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html
// https://www.oreilly.com/library/view/mac-os-x/0596003560/ch05s01s02.html
#elif defined(__GNUC__) && defined(HAVE_MALLOC_H)
#include <malloc.h>
#define _mm_malloc(a, b) memalign(b, a)
#define _mm_free(a) free(a)
#else
#include <stdlib.h>
#define _mm_malloc(a, b) malloc(a)
#define _mm_free(a) free(a)
#endif
namespace LightGBM {
namespace Common {
/*!
* Imbues the stream with the C locale.
*/
static void C_stringstream(std::stringstream &ss) {
ss.imbue(std::locale::classic());
}
inline static char tolower(char in) {
if (in <= 'Z' && in >= 'A')
return in - ('Z' - 'z');
return in;
}
inline static std::string Trim(std::string str) {
if (str.empty()) {
return str;
}
str.erase(str.find_last_not_of(" \f\n\r\t\v") + 1);
str.erase(0, str.find_first_not_of(" \f\n\r\t\v"));
return str;
}
inline static std::string RemoveQuotationSymbol(std::string str) {
if (str.empty()) {
return str;
}
str.erase(str.find_last_not_of("'\"") + 1);
str.erase(0, str.find_first_not_of("'\""));
return str;
}
inline static bool StartsWith(const std::string& str, const std::string prefix) {
if (str.substr(0, prefix.size()) == prefix) {
return true;
} else {
return false;
}
}
inline static std::vector<std::string> Split(const char* c_str, char delimiter) {
std::vector<std::string> ret;
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
while (pos < str.length()) {
if (str[pos] == delimiter) {
if (i < pos) {
ret.push_back(str.substr(i, pos - i));
}
++pos;
i = pos;
} else {
++pos;
}
}
if (i < pos) {
ret.push_back(str.substr(i));
}
return ret;
}
inline static std::vector<std::string> SplitBrackets(const char* c_str, char left_delimiter, char right_delimiter) {
std::vector<std::string> ret;
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
bool open = false;
while (pos < str.length()) {
if (str[pos] == left_delimiter) {
open = true;
++pos;
i = pos;
} else if (str[pos] == right_delimiter && open) {
if (i < pos) {
ret.push_back(str.substr(i, pos - i));
}
open = false;
++pos;
} else {
++pos;
}
}
return ret;
}
inline static std::vector<std::string> SplitLines(const char* c_str) {
std::vector<std::string> ret;
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
while (pos < str.length()) {
if (str[pos] == '\n' || str[pos] == '\r') {
if (i < pos) {
ret.push_back(str.substr(i, pos - i));
}
// skip the line endings
while (str[pos] == '\n' || str[pos] == '\r') ++pos;
// new begin
i = pos;
} else {
++pos;
}
}
if (i < pos) {
ret.push_back(str.substr(i));
}
return ret;
}
inline static std::vector<std::string> Split(const char* c_str, const char* delimiters) {
std::vector<std::string> ret;
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
while (pos < str.length()) {
bool met_delimiters = false;
for (int j = 0; delimiters[j] != '\0'; ++j) {
if (str[pos] == delimiters[j]) {
met_delimiters = true;
break;
}
}
if (met_delimiters) {
if (i < pos) {
ret.push_back(str.substr(i, pos - i));
}
++pos;
i = pos;
} else {
++pos;
}
}
if (i < pos) {
ret.push_back(str.substr(i));
}
return ret;
}
template<typename T>
inline static const char* Atoi(const char* p, T* out) {
int sign;
T value;
while (*p == ' ') {
++p;
}
sign = 1;
if (*p == '-') {
sign = -1;
++p;
} else if (*p == '+') {
++p;
}
for (value = 0; *p >= '0' && *p <= '9'; ++p) {
value = value * 10 + (*p - '0');
}
*out = static_cast<T>(sign * value);
while (*p == ' ') {
++p;
}
return p;
}
template<typename T>
inline static double Pow(T base, int power) {
if (power < 0) {
return 1.0 / Pow(base, -power);
} else if (power == 0) {
return 1;
} else if (power % 2 == 0) {
return Pow(base*base, power / 2);
} else if (power % 3 == 0) {
return Pow(base*base*base, power / 3);
} else {
return base * Pow(base, power - 1);
}
}
inline static const char* Atof(const char* p, double* out) {
int frac;
double sign, value, scale;
*out = NAN;
// Skip leading white space, if any.
while (*p == ' ') {
++p;
}
// Get sign, if any.
sign = 1.0;
if (*p == '-') {
sign = -1.0;
++p;
} else if (*p == '+') {
++p;
}
// is a number
if ((*p >= '0' && *p <= '9') || *p == '.' || *p == 'e' || *p == 'E') {
// Get digits before decimal point or exponent, if any.
for (value = 0.0; *p >= '0' && *p <= '9'; ++p) {
value = value * 10.0 + (*p - '0');
}
// Get digits after decimal point, if any.
if (*p == '.') {
double right = 0.0;
int nn = 0;
++p;
while (*p >= '0' && *p <= '9') {
right = (*p - '0') + right * 10.0;
++nn;
++p;
}
value += right / Pow(10.0, nn);
}
// Handle exponent, if any.
frac = 0;
scale = 1.0;
if ((*p == 'e') || (*p == 'E')) {
uint32_t expon;
// Get sign of exponent, if any.
++p;
if (*p == '-') {
frac = 1;
++p;
} else if (*p == '+') {
++p;
}
// Get digits of exponent, if any.
for (expon = 0; *p >= '0' && *p <= '9'; ++p) {
expon = expon * 10 + (*p - '0');
}
if (expon > 308) expon = 308;
// Calculate scaling factor.
while (expon >= 50) { scale *= 1E50; expon -= 50; }
while (expon >= 8) { scale *= 1E8; expon -= 8; }
while (expon > 0) { scale *= 10.0; expon -= 1; }
}
// Return signed and scaled floating point result.
*out = sign * (frac ? (value / scale) : (value * scale));
} else {
size_t cnt = 0;
while (*(p + cnt) != '\0' && *(p + cnt) != ' '
&& *(p + cnt) != '\t' && *(p + cnt) != ','
&& *(p + cnt) != '\n' && *(p + cnt) != '\r'
&& *(p + cnt) != ':') {
++cnt;
}
if (cnt > 0) {
std::string tmp_str(p, cnt);
std::transform(tmp_str.begin(), tmp_str.end(), tmp_str.begin(), Common::tolower);
if (tmp_str == std::string("na") || tmp_str == std::string("nan") ||
tmp_str == std::string("null")) {
*out = NAN;
} else if (tmp_str == std::string("inf") || tmp_str == std::string("infinity")) {
*out = sign * 1e308;
} else {
Log::Fatal("Unknown token %s in data file", tmp_str.c_str());
}
p += cnt;
}
}
while (*p == ' ') {
++p;
}
return p;
}
inline static bool AtoiAndCheck(const char* p, int* out) {
const char* after = Atoi(p, out);
if (*after != '\0') {
return false;
}
return true;
}
inline static bool AtofAndCheck(const char* p, double* out) {
const char* after = Atof(p, out);
if (*after != '\0') {
return false;
}
return true;
}
inline static const char* SkipSpaceAndTab(const char* p) {
while (*p == ' ' || *p == '\t') {
++p;
}
return p;
}
inline static const char* SkipReturn(const char* p) {
while (*p == '\n' || *p == '\r' || *p == ' ') {
++p;
}
return p;
}
template<typename T, typename T2>
inline static std::vector<T2> ArrayCast(const std::vector<T>& arr) {
std::vector<T2> ret(arr.size());
for (size_t i = 0; i < arr.size(); ++i) {
ret[i] = static_cast<T2>(arr[i]);
}
return ret;
}
template<typename T, bool is_float>
struct __StringToTHelper {
T operator()(const std::string& str) const {
T ret = 0;
Atoi(str.c_str(), &ret);
return ret;
}
};
template<typename T>
struct __StringToTHelper<T, true> {
T operator()(const std::string& str) const {
return static_cast<T>(std::stod(str));
}
};
template<typename T>
inline static std::vector<T> StringToArray(const std::string& str, char delimiter) {
std::vector<std::string> strs = Split(str.c_str(), delimiter);
std::vector<T> ret;
ret.reserve(strs.size());
__StringToTHelper<T, std::is_floating_point<T>::value> helper;
for (const auto& s : strs) {
ret.push_back(helper(s));
}
return ret;
}
template<typename T>
inline static std::vector<std::vector<T>> StringToArrayofArrays(
const std::string& str, char left_bracket, char right_bracket, char delimiter) {
std::vector<std::string> strs = SplitBrackets(str.c_str(), left_bracket, right_bracket);
std::vector<std::vector<T>> ret;
for (const auto& s : strs) {
ret.push_back(StringToArray<T>(s, delimiter));
}
return ret;
}
template<typename T>
inline static std::vector<T> StringToArray(const std::string& str, int n) {
if (n == 0) {
return std::vector<T>();
}
std::vector<std::string> strs = Split(str.c_str(), ' ');
CHECK_EQ(strs.size(), static_cast<size_t>(n));
std::vector<T> ret;
ret.reserve(strs.size());
__StringToTHelper<T, std::is_floating_point<T>::value> helper;
for (const auto& s : strs) {
ret.push_back(helper(s));
}
return ret;
}
template<typename T, bool is_float>
struct __StringToTHelperFast {
const char* operator()(const char*p, T* out) const {
return Atoi(p, out);
}
};
template<typename T>
struct __StringToTHelperFast<T, true> {
const char* operator()(const char*p, T* out) const {
double tmp = 0.0f;
auto ret = Atof(p, &tmp);
*out = static_cast<T>(tmp);
return ret;
}
};
template<typename T>
inline static std::vector<T> StringToArrayFast(const std::string& str, int n) {
if (n == 0) {
return std::vector<T>();
}
auto p_str = str.c_str();
__StringToTHelperFast<T, std::is_floating_point<T>::value> helper;
std::vector<T> ret(n);
for (int i = 0; i < n; ++i) {
p_str = helper(p_str, &ret[i]);
}
return ret;
}
template<typename T>
inline static std::string Join(const std::vector<T>& strs, const char* delimiter, const bool force_C_locale = false) {
if (strs.empty()) {
return std::string("");
}
std::stringstream str_buf;
if (force_C_locale) {
C_stringstream(str_buf);
}
str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2);
str_buf << strs[0];
for (size_t i = 1; i < strs.size(); ++i) {
str_buf << delimiter;
str_buf << strs[i];
}
return str_buf.str();
}
template<>
inline std::string Join<int8_t>(const std::vector<int8_t>& strs, const char* delimiter, const bool force_C_locale) {
if (strs.empty()) {
return std::string("");
}
std::stringstream str_buf;
if (force_C_locale) {
C_stringstream(str_buf);
}
str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2);
str_buf << static_cast<int16_t>(strs[0]);
for (size_t i = 1; i < strs.size(); ++i) {
str_buf << delimiter;
str_buf << static_cast<int16_t>(strs[i]);
}
return str_buf.str();
}
template<typename T>
inline static std::string Join(const std::vector<T>& strs, size_t start, size_t end, const char* delimiter, const bool force_C_locale = false) {
if (end - start <= 0) {
return std::string("");
}
start = std::min(start, static_cast<size_t>(strs.size()) - 1);
end = std::min(end, static_cast<size_t>(strs.size()));
std::stringstream str_buf;
if (force_C_locale) {
C_stringstream(str_buf);
}
str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2);
str_buf << strs[start];
for (size_t i = start + 1; i < end; ++i) {
str_buf << delimiter;
str_buf << strs[i];
}
return str_buf.str();
}
inline static int64_t Pow2RoundUp(int64_t x) {
int64_t t = 1;
for (int i = 0; i < 64; ++i) {
if (t >= x) {
return t;
}
t <<= 1;
}
return 0;
}
/*!
* \brief Do inplace softmax transformation on p_rec
* \param p_rec The input/output vector of the values.
*/
inline static void Softmax(std::vector<double>* p_rec) {
std::vector<double> &rec = *p_rec;
double wmax = rec[0];
for (size_t i = 1; i < rec.size(); ++i) {
wmax = std::max(rec[i], wmax);
}
double wsum = 0.0f;
for (size_t i = 0; i < rec.size(); ++i) {
rec[i] = std::exp(rec[i] - wmax);
wsum += rec[i];
}
for (size_t i = 0; i < rec.size(); ++i) {
rec[i] /= static_cast<double>(wsum);
}
}
inline static void Softmax(const double* input, double* output, int len) {
double wmax = input[0];
for (int i = 1; i < len; ++i) {
wmax = std::max(input[i], wmax);
}
double wsum = 0.0f;
for (int i = 0; i < len; ++i) {
output[i] = std::exp(input[i] - wmax);
wsum += output[i];
}
for (int i = 0; i < len; ++i) {
output[i] /= static_cast<double>(wsum);
}
}
template<typename T>
std::vector<const T*> ConstPtrInVectorWrapper(const std::vector<std::unique_ptr<T>>& input) {
std::vector<const T*> ret;
for (auto t = input.begin(); t !=input.end(); ++t) {
ret.push_back(t->get());
}
return ret;
}
template<typename T1, typename T2>
inline static void SortForPair(std::vector<T1>* keys, std::vector<T2>* values, size_t start, bool is_reverse = false) {
std::vector<std::pair<T1, T2>> arr;
auto& ref_key = *keys;
auto& ref_value = *values;
for (size_t i = start; i < keys->size(); ++i) {
arr.emplace_back(ref_key[i], ref_value[i]);
}
if (!is_reverse) {
std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) {
return a.first < b.first;
});
} else {
std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) {
return a.first > b.first;
});
}
for (size_t i = start; i < arr.size(); ++i) {
ref_key[i] = arr[i].first;
ref_value[i] = arr[i].second;
}
}
template <typename T>
inline static std::vector<T*> Vector2Ptr(std::vector<std::vector<T>>* data) {
std::vector<T*> ptr(data->size());
auto& ref_data = *data;
for (size_t i = 0; i < data->size(); ++i) {
ptr[i] = ref_data[i].data();
}
return ptr;
}
template <typename T>
inline static std::vector<int> VectorSize(const std::vector<std::vector<T>>& data) {
std::vector<int> ret(data.size());
for (size_t i = 0; i < data.size(); ++i) {
ret[i] = static_cast<int>(data[i].size());
}
return ret;
}
inline static double AvoidInf(double x) {
if (std::isnan(x)) {
return 0.0;
} else if (x >= 1e300) {
return 1e300;
} else if (x <= -1e300) {
return -1e300;
} else {
return x;
}
}
inline static float AvoidInf(float x) {
if (std::isnan(x)) {
return 0.0f;
} else if (x >= 1e38) {
return 1e38f;
} else if (x <= -1e38) {
return -1e38f;
} else {
return x;
}
}
template<typename _Iter> inline
static typename std::iterator_traits<_Iter>::value_type* IteratorValType(_Iter) {
return (0);
}
template<typename _RanIt, typename _Pr, typename _VTRanIt> inline
static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred, _VTRanIt*) {
size_t len = _Last - _First;
const size_t kMinInnerLen = 1024;
int num_threads = OMP_NUM_THREADS();
if (len <= kMinInnerLen || num_threads <= 1) {
std::sort(_First, _Last, _Pred);
return;
}
size_t inner_size = (len + num_threads - 1) / num_threads;
inner_size = std::max(inner_size, kMinInnerLen);
num_threads = static_cast<int>((len + inner_size - 1) / inner_size);
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < num_threads; ++i) {
size_t left = inner_size*i;
size_t right = left + inner_size;
right = std::min(right, len);
if (right > left) {
std::sort(_First + left, _First + right, _Pred);
}
}
// Buffer for merge.
std::vector<_VTRanIt> temp_buf(len);
_RanIt buf = temp_buf.begin();
size_t s = inner_size;
// Recursive merge
while (s < len) {
int loop_size = static_cast<int>((len + s * 2 - 1) / (s * 2));
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < loop_size; ++i) {
size_t left = i * 2 * s;
size_t mid = left + s;
size_t right = mid + s;
right = std::min(len, right);
if (mid >= right) { continue; }
std::copy(_First + left, _First + mid, buf + left);
std::merge(buf + left, buf + mid, _First + mid, _First + right, _First + left, _Pred);
}
s *= 2;
}
}
template<typename _RanIt, typename _Pr> inline
static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred) {
return ParallelSort(_First, _Last, _Pred, IteratorValType(_First));
}
// Check that all y[] are in interval [ymin, ymax] (end points included); throws error if not
template <typename T>
inline static void CheckElementsIntervalClosed(const T *y, T ymin, T ymax, int ny, const char *callername) {
auto fatal_msg = [&y, &ymin, &ymax, &callername](int i) {
std::ostringstream os;
os << "[%s]: does not tolerate element [#%i = " << y[i] << "] outside [" << ymin << ", " << ymax << "]";
Log::Fatal(os.str().c_str(), callername, i);
};
for (int i = 1; i < ny; i += 2) {
if (y[i - 1] < y[i]) {
if (y[i - 1] < ymin) {
fatal_msg(i - 1);
} else if (y[i] > ymax) {
fatal_msg(i);
}
} else {
if (y[i - 1] > ymax) {
fatal_msg(i - 1);
} else if (y[i] < ymin) {
fatal_msg(i);
}
}
}
if (ny & 1) { // odd
if (y[ny - 1] < ymin || y[ny - 1] > ymax) {
fatal_msg(ny - 1);
}
}
}
// One-pass scan over array w with nw elements: find min, max and sum of elements;
// this is useful for checking weight requirements.
template <typename T1, typename T2>
inline static void ObtainMinMaxSum(const T1 *w, int nw, T1 *mi, T1 *ma, T2 *su) {
T1 minw;
T1 maxw;
T1 sumw;
int i;
if (nw & 1) { // odd
minw = w[0];
maxw = w[0];
sumw = w[0];
i = 2;
} else { // even
if (w[0] < w[1]) {
minw = w[0];
maxw = w[1];
} else {
minw = w[1];
maxw = w[0];
}
sumw = w[0] + w[1];
i = 3;
}
for (; i < nw; i += 2) {
if (w[i - 1] < w[i]) {
minw = std::min(minw, w[i - 1]);
maxw = std::max(maxw, w[i]);
} else {
minw = std::min(minw, w[i]);
maxw = std::max(maxw, w[i - 1]);
}
sumw += w[i - 1] + w[i];
}
if (mi != nullptr) {
*mi = minw;
}
if (ma != nullptr) {
*ma = maxw;
}
if (su != nullptr) {
*su = static_cast<T2>(sumw);
}
}
inline static std::vector<uint32_t> EmptyBitset(int n) {
int size = n / 32;
if (n % 32 != 0) ++size;
return std::vector<uint32_t>(size);
}
template<typename T>
inline static void InsertBitset(std::vector<uint32_t>* vec, const T val) {
auto& ref_v = *vec;
int i1 = val / 32;
int i2 = val % 32;
if (static_cast<int>(vec->size()) < i1 + 1) {
vec->resize(i1 + 1, 0);
}
ref_v[i1] |= (1 << i2);
}
template<typename T>
inline static std::vector<uint32_t> ConstructBitset(const T* vals, int n) {
std::vector<uint32_t> ret;
for (int i = 0; i < n; ++i) {
int i1 = vals[i] / 32;
int i2 = vals[i] % 32;
if (static_cast<int>(ret.size()) < i1 + 1) {
ret.resize(i1 + 1, 0);
}
ret[i1] |= (1 << i2);
}
return ret;
}
template<typename T>
inline static bool FindInBitset(const uint32_t* bits, int n, T pos) {
int i1 = pos / 32;
if (i1 >= n) {
return false;
}
int i2 = pos % 32;
return (bits[i1] >> i2) & 1;
}
inline static bool CheckDoubleEqualOrdered(double a, double b) {
double upper = std::nextafter(a, INFINITY);
return b <= upper;
}
inline static double GetDoubleUpperBound(double a) {
return std::nextafter(a, INFINITY);
}
inline static size_t GetLine(const char* str) {
auto start = str;
while (*str != '\0' && *str != '\n' && *str != '\r') {
++str;
}
return str - start;
}
inline static const char* SkipNewLine(const char* str) {
if (*str == '\r') {
++str;
}
if (*str == '\n') {
++str;
}
return str;
}
template <typename T>
static int Sign(T x) {
return (x > T(0)) - (x < T(0));
}
template <typename T>
static T SafeLog(T x) {
if (x > 0) {
return std::log(x);
} else {
return -INFINITY;
}
}
inline bool CheckAllowedJSON(const std::string& s) {
unsigned char char_code;
for (auto c : s) {
char_code = static_cast<unsigned char>(c);
if (char_code == 34 // "
|| char_code == 44 // ,
|| char_code == 58 // :
|| char_code == 91 // [
|| char_code == 93 // ]
|| char_code == 123 // {
|| char_code == 125 // }
) {
return false;
}
}
return true;
}
inline int RoundInt(double x) {
return static_cast<int>(x + 0.5f);
}
template <typename T, std::size_t N = 32>
class AlignmentAllocator {
public:
typedef T value_type;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
inline AlignmentAllocator() throw() {}
template <typename T2>
inline AlignmentAllocator(const AlignmentAllocator<T2, N>&) throw() {}
inline ~AlignmentAllocator() throw() {}
inline pointer adress(reference r) {
return &r;
}
inline const_pointer adress(const_reference r) const {
return &r;
}
inline pointer allocate(size_type n) {
return (pointer)_mm_malloc(n * sizeof(value_type), N);
}
inline void deallocate(pointer p, size_type) {
_mm_free(p);
}
inline void construct(pointer p, const value_type& wert) {
new (p) value_type(wert);
}
inline void destroy(pointer p) {
p->~value_type();
}
inline size_type max_size() const throw() {
return size_type(-1) / sizeof(value_type);
}
template <typename T2>
struct rebind {
typedef AlignmentAllocator<T2, N> other;
};
bool operator!=(const AlignmentAllocator<T, N>& other) const {
return !(*this == other);
}
// Returns true if and only if storage allocated from *this
// can be deallocated from other, and vice versa.
// Always returns true for stateless allocators.
bool operator==(const AlignmentAllocator<T, N>&) const {
return true;
}
};
class Timer {
public:
Timer() {
#ifdef TIMETAG
int num_threads = OMP_NUM_THREADS();
start_time_.resize(num_threads);
stats_.resize(num_threads);
#endif // TIMETAG
}
~Timer() { Print(); }
#ifdef TIMETAG
void Start(const std::string& name) {
auto tid = omp_get_thread_num();
start_time_[tid][name] = std::chrono::steady_clock::now();
}
void Stop(const std::string& name) {
auto cur_time = std::chrono::steady_clock::now();
auto tid = omp_get_thread_num();
if (stats_[tid].find(name) == stats_[tid].end()) {
stats_[tid][name] = std::chrono::duration<double, std::milli>(0);
}
stats_[tid][name] += cur_time - start_time_[tid][name];
}
#else
void Start(const std::string&) {}
void Stop(const std::string&) {}
#endif // TIMETAG
void Print() const {
#ifdef TIMETAG
std::unordered_map<std::string, std::chrono::duration<double, std::milli>>
stats(stats_[0].begin(), stats_[0].end());
for (size_t i = 1; i < stats_.size(); ++i) {
for (auto it = stats_[i].begin(); it != stats_[i].end(); ++it) {
if (stats.find(it->first) == stats.end()) {
stats[it->first] = it->second;
} else {
stats[it->first] += it->second;
}
}
}
std::map<std::string, std::chrono::duration<double, std::milli>> ordered(
stats.begin(), stats.end());
for (auto it = ordered.begin(); it != ordered.end(); ++it) {
Log::Info("%s costs:\t %f", it->first.c_str(), it->second * 1e-3);
}
#endif // TIMETAG
}
#ifdef TIMETAG
std::vector<
std::unordered_map<std::string, std::chrono::steady_clock::time_point>>
start_time_;
std::vector<std::unordered_map<std::string,
std::chrono::duration<double, std::milli>>>
stats_;
#endif // TIMETAG
};
// Note: this class is not thread-safe, don't use it inside omp blocks
class FunctionTimer {
public:
#ifdef TIMETAG
FunctionTimer(const std::string& name, Timer& timer) : timer_(timer) {
timer.Start(name);
name_ = name;
}
~FunctionTimer() { timer_.Stop(name_); }
private:
std::string name_;
Timer& timer_;
#else
FunctionTimer(const std::string&, Timer&) {}
#endif // TIMETAG
};
} // namespace Common
extern Common::Timer global_timer;
/*!
* Provides locale-independent alternatives to Common's methods.
* Essential to make models robust to locale settings.
*/
namespace CommonC {
template<typename T>
inline static std::string Join(const std::vector<T>& strs, const char* delimiter) {
return LightGBM::Common::Join(strs, delimiter, true);
}
template<typename T>
inline static std::string Join(const std::vector<T>& strs, size_t start, size_t end, const char* delimiter) {
return LightGBM::Common::Join(strs, start, end, delimiter, true);
}
inline static const char* Atof(const char* p, double* out) {
return LightGBM::Common::Atof(p, out);
}
template<typename T, bool is_float>
struct __StringToTHelperFast {
const char* operator()(const char*p, T* out) const {
return LightGBM::Common::Atoi(p, out);
}
};
/*!
* \warning Beware that ``Common::Atof`` in ``__StringToTHelperFast``,
* has **less** floating point precision than ``__StringToTHelper``.
* Both versions are kept to maintain bit-for-bit the "legacy" LightGBM behaviour in terms of precision.
* Check ``StringToArrayFast`` and ``StringToArray`` for more details on this.
*/
template<typename T>
struct __StringToTHelperFast<T, true> {
const char* operator()(const char*p, T* out) const {
double tmp = 0.0f;
auto ret = Atof(p, &tmp);
*out = static_cast<T>(tmp);
return ret;
}
};
template<typename T, bool is_float>
struct __StringToTHelper {
T operator()(const std::string& str) const {
T ret = 0;
LightGBM::Common::Atoi(str.c_str(), &ret);
return ret;
}
};
/*!
* \warning Beware that ``Common::Atof`` in ``__StringToTHelperFast``,
* has **less** floating point precision than ``__StringToTHelper``.
* Both versions are kept to maintain bit-for-bit the "legacy" LightGBM behaviour in terms of precision.
* Check ``StringToArrayFast`` and ``StringToArray`` for more details on this.
* \note It is possible that ``fast_double_parser::parse_number`` is faster than ``Common::Atof``.
*/
template<typename T>
struct __StringToTHelper<T, true> {
T operator()(const std::string& str) const {
double tmp;
// Fast (common) path: For numeric inputs in RFC 7159 format:
const bool fast_parse_succeeded = fast_double_parser::parse_number(str.c_str(), &tmp);
// Rare path: Not in RFC 7159 format. Possible "inf", "nan", etc. Fallback to standard library:
if (!fast_parse_succeeded) {
std::stringstream ss;
Common::C_stringstream(ss);
ss << str;
ss >> tmp;
}
return static_cast<T>(tmp);
}
};
/*!
* \warning Beware that due to internal use of ``Common::Atof`` in ``__StringToTHelperFast``,
* this method has less precision for floating point numbers than ``StringToArray``,
* which calls ``__StringToTHelper``.
* As such, ``StringToArrayFast`` and ``StringToArray`` are not equivalent!
* Both versions were kept to maintain bit-for-bit the "legacy" LightGBM behaviour in terms of precision.
*/
template<typename T>
inline static std::vector<T> StringToArrayFast(const std::string& str, int n) {
if (n == 0) {
return std::vector<T>();
}
auto p_str = str.c_str();
__StringToTHelperFast<T, std::is_floating_point<T>::value> helper;
std::vector<T> ret(n);
for (int i = 0; i < n; ++i) {
p_str = helper(p_str, &ret[i]);
}
return ret;
}
/*!
* \warning Do not replace calls to this method by ``StringToArrayFast``.
* This method is more precise for floating point numbers.
* Check ``StringToArrayFast`` for more details.
*/
template<typename T>
inline static std::vector<T> StringToArray(const std::string& str, int n) {
if (n == 0) {
return std::vector<T>();
}
std::vector<std::string> strs = LightGBM::Common::Split(str.c_str(), ' ');
CHECK_EQ(strs.size(), static_cast<size_t>(n));
std::vector<T> ret;
ret.reserve(strs.size());
__StringToTHelper<T, std::is_floating_point<T>::value> helper;
for (const auto& s : strs) {
ret.push_back(helper(s));
}
return ret;
}
/*!
* \warning Do not replace calls to this method by ``StringToArrayFast``.
* This method is more precise for floating point numbers.
* Check ``StringToArrayFast`` for more details.
*/
template<typename T>
inline static std::vector<T> StringToArray(const std::string& str, char delimiter) {
std::vector<std::string> strs = LightGBM::Common::Split(str.c_str(), delimiter);
std::vector<T> ret;
ret.reserve(strs.size());
__StringToTHelper<T, std::is_floating_point<T>::value> helper;
for (const auto& s : strs) {
ret.push_back(helper(s));
}
return ret;
}
#if (!((defined(sun) || defined(__sun)) && (defined(__SVR4) || defined(__svr4__))))
/*!
* Safely formats a value onto a buffer according to a format string and null-terminates it.
*
* \note It checks that the full value was written or forcefully aborts.
* This safety check serves to prevent incorrect internal API usage.
* Correct usage will never incur in this problem:
* - The received buffer size shall be sufficient at all times for the input format string and value.
*/
template <typename T>
inline static void format_to_buf(char* buffer, const size_t buf_len, const char* format, const T value) {
auto result = fmt::format_to_n(buffer, buf_len, format, value);
if (result.size >= buf_len) {
Log::Fatal("Numerical conversion failed. Buffer is too small.");
}
buffer[result.size] = '\0';
}
template<typename T, bool is_float, bool high_precision>
struct __TToStringHelper {
void operator()(T value, char* buffer, size_t buf_len) const {
format_to_buf(buffer, buf_len, "{}", value);
}
};
template<typename T>
struct __TToStringHelper<T, true, false> {
void operator()(T value, char* buffer, size_t buf_len) const {
format_to_buf(buffer, buf_len, "{:g}", value);
}
};
template<typename T>
struct __TToStringHelper<T, true, true> {
void operator()(T value, char* buffer, size_t buf_len) const {
format_to_buf(buffer, buf_len, "{:.17g}", value);
}
};
/*!
* Converts an array to a string with with values separated by the space character.
* This method replaces Common's ``ArrayToString`` and ``ArrayToStringFast`` functionality
* and is locale-independent.
*
* \note If ``high_precision_output`` is set to true,
* floating point values are output with more digits of precision.
*/
template<bool high_precision_output = false, typename T>
inline static std::string ArrayToString(const std::vector<T>& arr, size_t n) {
if (arr.empty() || n == 0) {
return std::string("");
}
__TToStringHelper<T, std::is_floating_point<T>::value, high_precision_output> helper;
const size_t buf_len = high_precision_output ? 32 : 16;
std::vector<char> buffer(buf_len);
std::stringstream str_buf;
Common::C_stringstream(str_buf);
helper(arr[0], buffer.data(), buf_len);
str_buf << buffer.data();
for (size_t i = 1; i < std::min(n, arr.size()); ++i) {
helper(arr[i], buffer.data(), buf_len);
str_buf << ' ' << buffer.data();
}
return str_buf.str();
}
#endif // (!((defined(sun) || defined(__sun)) && (defined(__SVR4) || defined(__svr4__))))
} // namespace CommonC
} // namespace LightGBM
#endif // LIGHTGBM_UTILS_COMMON_H_
|
matmul-parallel.c |
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <omp.h>
// Initialize matrices
void initialize_matrices(float *a, float *b, float *c, unsigned size,
unsigned seed) {
srand(seed);
for (int i = 0; i < size; ++i) {
for (int j = 0; j < size; ++j) {
a[i * size + j] = rand() % 10;
b[i * size + j] = rand() % 10;
c[i * size + j] = 0.0f;
}
}
}
// Parallelize this function using OpenMP
void multiply(float *a, float *b, float *c, unsigned size) {
float sum = 0.0;
int i, j, k;
#pragma omp parallel for default(none) reduction(+:sum) private(i, j, k) \
shared(a, b, c, size)
for (i = 0; i < size; ++i) {
for (j = 0; j < size; ++j) {
sum = 0.0;
for (k = 0; k < size; ++k) {
sum = sum + a[i * size + k] * b[k * size + j];
}
c[i * size + j] = sum;
}
}
}
// Output matrix to stdout
void print_matrix(float *c, unsigned size) {
for (int i = 0; i < size; i++) {
for (int j = 0; j < size; j++) {
printf(" %5.1f", c[i * size + j]);
}
printf("\n");
}
}
int main(int argc, char *argv[]) {
float *a, *b, *c;
unsigned seed, size;
double t;
FILE *input;
if (argc < 2) {
fprintf(stderr, "Error: missing path to input file\n");
return 1;
}
if ((input = fopen(argv[1], "r")) == NULL) {
fprintf(stderr, "Error: could not open file\n");
return 1;
}
// Read inputs
fscanf(input, "%u", &size);
fscanf(input, "%u", &seed);
// Do not change this line
omp_set_num_threads(4);
// Allocate matrices
a = (float *)malloc(sizeof(float) * size * size);
b = (float *)malloc(sizeof(float) * size * size);
c = (float *)malloc(sizeof(float) * size * size);
// initialize_matrices with random data
initialize_matrices(a, b, c, size, seed);
// Multiply matrices
t = omp_get_wtime();
multiply(a, b, c, size);
t = omp_get_wtime() - t;
// Show result
print_matrix(c, size);
// Output elapsed time
fprintf(stderr, "%lf\n", t);
// Release memory
free(a);
free(b);
free(c);
return EXIT_SUCCESS;
}
|
Vector.h | /*!
* @file Vector.h
* @author Michal Merta
* @author Jan Zapletal
* @date July 7, 2013
* @brief Header file for class Vector
*
*/
#ifndef VECTOR_H2
#define VECTOR_H2
#include <cstring>
#include <complex>
#include <iostream>
#include <fstream>
#include <type_traits>
#include "Macros.h"
#include "Offloadable.h"
#include "BLAS_LAPACK_wrapper.h"
namespace bem4i {
// forward declaration
template<class LO2, class SC2>
class FullMatrix;
template<class LO3, class SC3>
class IdentityOperator;
/*!
* Class representing a full vector
*
* Vector operations are performed by calling BLAS/LAPACK routines.
*
*/
template<class LO, class SC>
class Vector : public Offloadable {
typedef typename GetType<LO, SC>::SCVT SCVT;
//template<class LO2, class SCVT>
friend class FullMatrix<LO, SC>;
//template<class LO3, class SC3>
friend class IdentityOperator<LO, SC>;
public:
//! default constructor
Vector( );
//! copy constructor
Vector(
const Vector& orig
);
//! constructor allocating a vector of length length
Vector(
LO length,
bool zeroOut = true
);
//! constructor taking a user preallocated array to store vector entries
Vector(
LO length,
SC * data,
bool copy = false
);
//! destructor
virtual ~Vector( );
//! returns a length of a vector
inline LO getLength( ) const {
return length;
}
void copy(
Vector<LO, SC> & copy
) const;
void copyToConjugate(
Vector<LO, SC> & copy
) const;
//! returns an element idx of a vector
inline SC get(
LO idx
) const {
return data[idx];
}
//! returns pointer to data
inline SC* getData( ) const {
return data;
}
inline void setDeleteData(
bool deleteData
) {
this->deleteData = deleteData;
}
void setData(
LO length,
SC * data,
bool copy = false
) {
this->length = length;
if ( copy ) {
if ( this->data && this->deleteData ) {
delete [ ] this->data;
}
this->data = new SC[ length ];
memcpy( this->data, data, length * sizeof ( SC ) );
deleteData = true;
} else {
this->data = data;
deleteData = false;
}
}
void append(
Vector< LO, SC > & x
) {
SC * tmp = new SC[ this->length + x.length ];
memcpy( tmp, this->data, this->length * sizeof ( SC ) );
memcpy( tmp + this->length, x.data, x.length * sizeof ( SC ) );
if ( this->data && this->deleteData ) {
delete [ ] this->data;
}
this->data = tmp;
this->length += x.length;
this->deleteData = true;
}
//! returns real part of the vector
void real(
Vector< LO, SCVT > & real
) const;
//! returns imag part of the vector
void imag(
Vector< LO, SCVT > & imag
) const;
//! sets an element idx of a vector to value val
inline void set(
LO idx,
SC val
) {
data[idx] = val;
}
//! sets all elements of a vector to a value val
inline void setAll( SC val ) {
SC *p = data, *last = data + length;
while ( p != last ) *( p++ ) = val;
}
inline SC sum( ) {
SC sum = 0.0;
for ( LO i = 0; i < this->length; i++ ) {
sum += this->data[ i ];
}
return sum;
}
//! scales all values of a vector by alpha
void scale( SC alpha );
//! performs in-place conjugation
void conjugate( );
//! elementwise relative error
//void errorRelative( Vector<LO,SC> const & sol );
void resize( LO length, bool zero = true );
//! computes 2-norm of a vector
SCVT norm2( ) const;
LO findAbsMax( ) const;
//! computes a dot product of vector this and vector a
SC dot(
Vector<LO, SC> const & a
) const;
/*!
* @brief computes cross product this \times b
*
* Computes a sum this = this + alpha*x
* @param b second operand, size = nB * sizeof this
* @param ret preallocated return vector
* @param nB optional argument specifying number of inputs stored in b
*/
void cross(
Vector<LO, SC> const & b,
Vector<LO, SC> & ret,
LO nB = 1
) const;
/*!
* @brief Vector-vector addition
*
* Computes a sum this = this + alpha*x
* @param x
* @param alpha
*/
void add(
Vector<LO, SC> const & x,
SC alpha = 1.0
);
/*!
* @brief Vector-vector addition
*
* Computes a sum ret = this + alpha*x
* @param x not overwritten
* @param ret preallocated vector
* @param alpha
*/
void add(
Vector<LO, SC> const &x,
Vector<LO, SC> &ret,
SC alpha = 1.0
);
void add( LO i, SC val ) {
this->data[i] += val;
}
void addAtomic(
LO i,
SC val
) {
#pragma omp atomic update
this->data[ i ] += val;
}
void add( SC val ) {
for ( LO i = 0; i < this->length; ++i ) {
this->data[i] += val;
}
}
//! prints the vector
void print( std::ostream &stream = std::cout ) const;
void load(
const std::string & fileName
);
void xferToMIC(
int device = 0
) const {
#if N_MIC > 0
const SCVT * data = reinterpret_cast < SCVT * > ( this->data );
int mult = 1;
if ( !std::is_same< SC, SCVT >::value ) mult = 2;
#pragma omp target enter data device( device ) \
map( to : data[ 0 : mult * length ] )
#endif
}
void xferToHost(
int device = 0
) const {
#if N_MIC > 0
const SCVT * data = reinterpret_cast < SCVT * > ( this->data );
int mult = 1;
if ( !std::is_same< SC, SCVT >::value ) mult = 2;
#pragma omp target update device( device ) \
from( data[ 0 : mult * length ] )
#endif
}
void updateMIC(
int device = 0
) const {
#if N_MIC > 0
const SCVT * data = reinterpret_cast < SCVT * > ( this->data );
int mult = 1;
if ( !std::is_same< SC, SCVT >::value ) mult = 2;
#pragma omp target update device( device ) \
to( data[ 0 : mult * length ] )
#endif
}
void deleteMIC(
int device = 0
) const {
#if N_MIC > 0
const SCVT * data = reinterpret_cast < SCVT * > ( this->data );
#pragma omp target exit data device( device ) \
map( delete : data )
#endif
}
private:
//! number of vector elements
LO length;
//! array of vector data
SC *data;
//! determines whether to destructor deletes data
bool deleteData;
};
}
// include .cpp file to overcome linking problems due to templates
#include "Vector.cpp"
#endif /* VECTOR_H2 */
|
c-tree.h | /* Definitions for C parsing and type checking.
Copyright (C) 1987-2018 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_C_TREE_H
#define GCC_C_TREE_H
#include "c-family/c-common.h"
#include "diagnostic.h"
/* struct lang_identifier is private to c-decl.c, but langhooks.c needs to
know how big it is. This is sanity-checked in c-decl.c. */
#define C_SIZEOF_STRUCT_LANG_IDENTIFIER \
(sizeof (struct c_common_identifier) + 3 * sizeof (void *))
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */
#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1 (TYPE)
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is volatile. */
#define C_TYPE_FIELDS_VOLATILE(TYPE) TREE_LANG_FLAG_2 (TYPE)
/* In a RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE
nonzero if the definition of the type has already started. */
#define C_TYPE_BEING_DEFINED(TYPE) TYPE_LANG_FLAG_0 (TYPE)
/* In an incomplete RECORD_TYPE or UNION_TYPE, a list of variable
declarations whose type would be completed by completing that type. */
#define C_TYPE_INCOMPLETE_VARS(TYPE) TYPE_VFIELD (TYPE)
/* In an IDENTIFIER_NODE, nonzero if this identifier is actually a
keyword. C_RID_CODE (node) is then the RID_* value of the keyword. */
#define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_0 (ID)
/* Record whether a type or decl was written with nonconstant size.
Note that TYPE_SIZE may have simplified to a constant. */
#define C_TYPE_VARIABLE_SIZE(TYPE) TYPE_LANG_FLAG_1 (TYPE)
#define C_DECL_VARIABLE_SIZE(TYPE) DECL_LANG_FLAG_0 (TYPE)
/* Record whether a type is defined inside a struct or union type.
This is used for -Wc++-compat. */
#define C_TYPE_DEFINED_IN_STRUCT(TYPE) TYPE_LANG_FLAG_2 (TYPE)
/* Record whether an "incomplete type" error was given for the type. */
#define C_TYPE_ERROR_REPORTED(TYPE) TYPE_LANG_FLAG_3 (TYPE)
/* Record whether a typedef for type `int' was actually `signed int'. */
#define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP)
/* For a FUNCTION_DECL, nonzero if it was defined without an explicit
return type. */
#define C_FUNCTION_IMPLICIT_INT(EXP) DECL_LANG_FLAG_1 (EXP)
/* For a FUNCTION_DECL, nonzero if it was an implicit declaration. */
#define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP)
/* For a PARM_DECL, nonzero if it was declared as an array. */
#define C_ARRAY_PARAMETER(NODE) DECL_LANG_FLAG_0 (NODE)
/* For FUNCTION_DECLs, evaluates true if the decl is built-in but has
been declared. */
#define C_DECL_DECLARED_BUILTIN(EXP) \
DECL_LANG_FLAG_3 (FUNCTION_DECL_CHECK (EXP))
/* For FUNCTION_DECLs, evaluates true if the decl is built-in, has a
built-in prototype and does not have a non-built-in prototype. */
#define C_DECL_BUILTIN_PROTOTYPE(EXP) \
DECL_LANG_FLAG_6 (FUNCTION_DECL_CHECK (EXP))
/* Record whether a decl was declared register. This is strictly a
front-end flag, whereas DECL_REGISTER is used for code generation;
they may differ for structures with volatile fields. */
#define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4 (EXP)
/* Record whether a decl was used in an expression anywhere except an
unevaluated operand of sizeof / typeof / alignof. This is only
used for functions declared static but not defined, though outside
sizeof and typeof it is set for other function decls as well. */
#define C_DECL_USED(EXP) DECL_LANG_FLAG_5 (FUNCTION_DECL_CHECK (EXP))
/* Record whether a variable has been declared threadprivate by
#pragma omp threadprivate. */
#define C_DECL_THREADPRIVATE_P(DECL) DECL_LANG_FLAG_3 (VAR_DECL_CHECK (DECL))
/* Nonzero for a decl which either doesn't exist or isn't a prototype.
N.B. Could be simplified if all built-in decls had complete prototypes
(but this is presently difficult because some of them need FILE*). */
#define C_DECL_ISNT_PROTOTYPE(EXP) \
(EXP == 0 \
|| (!prototype_p (TREE_TYPE (EXP)) \
&& !DECL_BUILT_IN (EXP)))
/* For FUNCTION_TYPE, a hidden list of types of arguments. The same as
TYPE_ARG_TYPES for functions with prototypes, but created for functions
without prototypes. */
#define TYPE_ACTUAL_ARG_TYPES(NODE) TYPE_LANG_SLOT_1 (NODE)
/* For a CONSTRUCTOR, whether some initializer contains a
subexpression meaning it is not a constant expression. */
#define CONSTRUCTOR_NON_CONST(EXPR) TREE_LANG_FLAG_1 (CONSTRUCTOR_CHECK (EXPR))
/* For a SAVE_EXPR, nonzero if the operand of the SAVE_EXPR has already
been folded. */
#define SAVE_EXPR_FOLDED_P(EXP) TREE_LANG_FLAG_1 (SAVE_EXPR_CHECK (EXP))
/* Record parser information about an expression that is irrelevant
for code generation alongside a tree representing its value. */
struct c_expr
{
/* The value of the expression. */
tree value;
/* Record the original unary/binary operator of an expression, which may
have been changed by fold, STRING_CST for unparenthesized string
constants, C_MAYBE_CONST_EXPR for __builtin_constant_p calls
(even if parenthesized), for subexpressions, and for non-constant
initializers, or ERROR_MARK for other expressions (including
parenthesized expressions). */
enum tree_code original_code;
/* If not NULL, the original type of an expression. This will
differ from the type of the value field for an enum constant.
The type of an enum constant is a plain integer type, but this
field will be the enum type. */
tree original_type;
/* The source range of this expression. This is redundant
for node values that have locations, but not all node kinds
have locations (e.g. constants, and references to params, locals,
etc), so we stash a copy here. */
source_range src_range;
/* Access to the first and last locations within the source spelling
of this expression. */
location_t get_start () const { return src_range.m_start; }
location_t get_finish () const { return src_range.m_finish; }
location_t get_location () const
{
if (EXPR_HAS_LOCATION (value))
return EXPR_LOCATION (value);
else
return make_location (get_start (), get_start (), get_finish ());
}
/* Set the value to error_mark_node whilst ensuring that src_range
is initialized. */
void set_error ()
{
value = error_mark_node;
src_range.m_start = UNKNOWN_LOCATION;
src_range.m_finish = UNKNOWN_LOCATION;
}
};
/* Type alias for struct c_expr. This allows to use the structure
inside the VEC types. */
typedef struct c_expr c_expr_t;
/* A kind of type specifier. Note that this information is currently
only used to distinguish tag definitions, tag references and typeof
uses. */
enum c_typespec_kind {
/* No typespec. This appears only in struct c_declspec. */
ctsk_none,
/* A reserved keyword type specifier. */
ctsk_resword,
/* A reference to a tag, previously declared, such as "struct foo".
This includes where the previous declaration was as a different
kind of tag, in which case this is only valid if shadowing that
tag in an inner scope. */
ctsk_tagref,
/* A reference to a tag, not previously declared in a visible
scope. */
ctsk_tagfirstref,
/* A definition of a tag such as "struct foo { int a; }". */
ctsk_tagdef,
/* A typedef name. */
ctsk_typedef,
/* An ObjC-specific kind of type specifier. */
ctsk_objc,
/* A typeof specifier, or _Atomic ( type-name ). */
ctsk_typeof
};
/* A type specifier: this structure is created in the parser and
passed to declspecs_add_type only. */
struct c_typespec {
/* What kind of type specifier this is. */
enum c_typespec_kind kind;
/* Whether the expression has operands suitable for use in constant
expressions. */
bool expr_const_operands;
/* The specifier itself. */
tree spec;
/* An expression to be evaluated before the type specifier, in the
case of typeof specifiers, or NULL otherwise or if no such
expression is required for a particular typeof specifier. In
particular, when typeof is applied to an expression of variably
modified type, that expression must be evaluated in order to
determine array sizes that form part of the type, but the
expression itself (as opposed to the array sizes) forms no part
of the type and so needs to be recorded separately. */
tree expr;
};
/* A storage class specifier. */
enum c_storage_class {
csc_none,
csc_auto,
csc_extern,
csc_register,
csc_static,
csc_typedef
};
/* A type specifier keyword "void", "_Bool", "char", "int", "float",
"double", "_Decimal32", "_Decimal64", "_Decimal128", "_Fract", "_Accum",
or none of these. */
enum c_typespec_keyword {
cts_none,
cts_void,
cts_bool,
cts_char,
cts_int,
cts_float,
cts_int_n,
cts_double,
cts_dfloat32,
cts_dfloat64,
cts_dfloat128,
cts_floatn_nx,
cts_fract,
cts_accum,
cts_auto_type
};
/* This enum lists all the possible declarator specifiers, storage
class or attribute that a user can write. There is at least one
enumerator per possible declarator specifier in the struct
c_declspecs below.
It is used to index the array of declspec locations in struct
c_declspecs. */
enum c_declspec_word {
cdw_typespec /* A catch-all for a typespec. */,
cdw_storage_class /* A catch-all for a storage class */,
cdw_attributes,
cdw_typedef,
cdw_explicit_signed,
cdw_deprecated,
cdw_default_int,
cdw_long,
cdw_long_long,
cdw_short,
cdw_signed,
cdw_unsigned,
cdw_complex,
cdw_inline,
cdw_noreturn,
cdw_thread,
cdw_const,
cdw_volatile,
cdw_restrict,
cdw_atomic,
cdw_saturating,
cdw_alignas,
cdw_address_space,
cdw_gimple,
cdw_rtl,
cdw_number_of_elements /* This one must always be the last
enumerator. */
};
/* A sequence of declaration specifiers in C. When a new declaration
specifier is added, please update the enum c_declspec_word above
accordingly. */
struct c_declspecs {
source_location locations[cdw_number_of_elements];
/* The type specified, if a single type specifier such as a struct,
union or enum specifier, typedef name or typeof specifies the
whole type, or NULL_TREE if none or a keyword such as "void" or
"char" is used. Does not include qualifiers. */
tree type;
/* Any expression to be evaluated before the type, from a typeof
specifier. */
tree expr;
/* The attributes from a typedef decl. */
tree decl_attr;
/* When parsing, the attributes. Outside the parser, this will be
NULL; attributes (possibly from multiple lists) will be passed
separately. */
tree attrs;
/* The pass to start compiling a __GIMPLE or __RTL function with. */
char *gimple_or_rtl_pass;
/* The base-2 log of the greatest alignment required by an _Alignas
specifier, in bytes, or -1 if no such specifiers with nonzero
alignment. */
int align_log;
/* For the __intN declspec, this stores the index into the int_n_* arrays. */
int int_n_idx;
/* For the _FloatN and _FloatNx declspec, this stores the index into
the floatn_nx_types array. */
int floatn_nx_idx;
/* The storage class specifier, or csc_none if none. */
enum c_storage_class storage_class;
/* Any type specifier keyword used such as "int", not reflecting
modifiers such as "short", or cts_none if none. */
ENUM_BITFIELD (c_typespec_keyword) typespec_word : 8;
/* The kind of type specifier if one has been seen, ctsk_none
otherwise. */
ENUM_BITFIELD (c_typespec_kind) typespec_kind : 3;
/* Whether any expressions in typeof specifiers may appear in
constant expressions. */
BOOL_BITFIELD expr_const_operands : 1;
/* Whether any declaration specifiers have been seen at all. */
BOOL_BITFIELD declspecs_seen_p : 1;
/* Whether something other than a storage class specifier or
attribute has been seen. This is used to warn for the
obsolescent usage of storage class specifiers other than at the
start of the list. (Doing this properly would require function
specifiers to be handled separately from storage class
specifiers.) */
BOOL_BITFIELD non_sc_seen_p : 1;
/* Whether the type is specified by a typedef or typeof name. */
BOOL_BITFIELD typedef_p : 1;
/* Whether the type is explicitly "signed" or specified by a typedef
whose type is explicitly "signed". */
BOOL_BITFIELD explicit_signed_p : 1;
/* Whether the specifiers include a deprecated typedef. */
BOOL_BITFIELD deprecated_p : 1;
/* Whether the type defaulted to "int" because there were no type
specifiers. */
BOOL_BITFIELD default_int_p : 1;
/* Whether "long" was specified. */
BOOL_BITFIELD long_p : 1;
/* Whether "long" was specified more than once. */
BOOL_BITFIELD long_long_p : 1;
/* Whether "short" was specified. */
BOOL_BITFIELD short_p : 1;
/* Whether "signed" was specified. */
BOOL_BITFIELD signed_p : 1;
/* Whether "unsigned" was specified. */
BOOL_BITFIELD unsigned_p : 1;
/* Whether "complex" was specified. */
BOOL_BITFIELD complex_p : 1;
/* Whether "inline" was specified. */
BOOL_BITFIELD inline_p : 1;
/* Whether "_Noreturn" was speciied. */
BOOL_BITFIELD noreturn_p : 1;
/* Whether "__thread" or "_Thread_local" was specified. */
BOOL_BITFIELD thread_p : 1;
/* Whether "__thread" rather than "_Thread_local" was specified. */
BOOL_BITFIELD thread_gnu_p : 1;
/* Whether "const" was specified. */
BOOL_BITFIELD const_p : 1;
/* Whether "volatile" was specified. */
BOOL_BITFIELD volatile_p : 1;
/* Whether "restrict" was specified. */
BOOL_BITFIELD restrict_p : 1;
/* Whether "_Atomic" was specified. */
BOOL_BITFIELD atomic_p : 1;
/* Whether "_Sat" was specified. */
BOOL_BITFIELD saturating_p : 1;
/* Whether any alignment specifier (even with zero alignment) was
specified. */
BOOL_BITFIELD alignas_p : 1;
/* Whether any __GIMPLE specifier was specified. */
BOOL_BITFIELD gimple_p : 1;
/* Whether any __RTL specifier was specified. */
BOOL_BITFIELD rtl_p : 1;
/* The address space that the declaration belongs to. */
addr_space_t address_space;
};
/* The various kinds of declarators in C. */
enum c_declarator_kind {
/* An identifier. */
cdk_id,
/* A function. */
cdk_function,
/* An array. */
cdk_array,
/* A pointer. */
cdk_pointer,
/* Parenthesized declarator with nested attributes. */
cdk_attrs
};
struct c_arg_tag {
/* The argument name. */
tree id;
/* The type of the argument. */
tree type;
};
/* Information about the parameters in a function declarator. */
struct c_arg_info {
/* A list of parameter decls. */
tree parms;
/* A list of structure, union and enum tags defined. */
vec<c_arg_tag, va_gc> *tags;
/* A list of argument types to go in the FUNCTION_TYPE. */
tree types;
/* A list of non-parameter decls (notably enumeration constants)
defined with the parameters. */
tree others;
/* A compound expression of VLA sizes from the parameters, or NULL.
In a function definition, these are used to ensure that
side-effects in sizes of arrays converted to pointers (such as a
parameter int i[n++]) take place; otherwise, they are
ignored. */
tree pending_sizes;
/* True when these arguments had [*]. */
BOOL_BITFIELD had_vla_unspec : 1;
};
/* A declarator. */
struct c_declarator {
/* The kind of declarator. */
enum c_declarator_kind kind;
location_t id_loc; /* Currently only set for cdk_id, cdk_array. */
/* Except for cdk_id, the contained declarator. For cdk_id, NULL. */
struct c_declarator *declarator;
union {
/* For identifiers, an IDENTIFIER_NODE or NULL_TREE if an abstract
declarator. */
tree id;
/* For functions. */
struct c_arg_info *arg_info;
/* For arrays. */
struct {
/* The array dimension, or NULL for [] and [*]. */
tree dimen;
/* The qualifiers inside []. */
int quals;
/* The attributes (currently ignored) inside []. */
tree attrs;
/* Whether [static] was used. */
BOOL_BITFIELD static_p : 1;
/* Whether [*] was used. */
BOOL_BITFIELD vla_unspec_p : 1;
} array;
/* For pointers, the qualifiers on the pointer type. */
int pointer_quals;
/* For attributes. */
tree attrs;
} u;
};
/* A type name. */
struct c_type_name {
/* The declaration specifiers. */
struct c_declspecs *specs;
/* The declarator. */
struct c_declarator *declarator;
};
/* A parameter. */
struct c_parm {
/* The declaration specifiers, minus any prefix attributes. */
struct c_declspecs *specs;
/* The attributes. */
tree attrs;
/* The declarator. */
struct c_declarator *declarator;
/* The location of the parameter. */
location_t loc;
};
/* Used when parsing an enum. Initialized by start_enum. */
struct c_enum_contents
{
/* While defining an enum type, this is 1 plus the last enumerator
constant value. */
tree enum_next_value;
/* Nonzero means that there was overflow computing enum_next_value. */
int enum_overflow;
};
/* A type of reference to a static identifier in an inline
function. */
enum c_inline_static_type {
/* Identifier with internal linkage used in function that may be an
inline definition (i.e., file-scope static). */
csi_internal,
/* Modifiable object with static storage duration defined in
function that may be an inline definition (i.e., local
static). */
csi_modifiable
};
/* in c-parser.c */
extern void c_parse_init (void);
extern bool c_keyword_starts_typename (enum rid keyword);
/* in c-aux-info.c */
extern void gen_aux_info_record (tree, int, int, int);
/* in c-decl.c */
struct c_spot_bindings;
struct c_struct_parse_info;
extern struct obstack parser_obstack;
extern tree c_break_label;
extern tree c_cont_label;
extern bool global_bindings_p (void);
extern tree pushdecl (tree);
extern void push_scope (void);
extern tree pop_scope (void);
extern void c_bindings_start_stmt_expr (struct c_spot_bindings *);
extern void c_bindings_end_stmt_expr (struct c_spot_bindings *);
extern void record_inline_static (location_t, tree, tree,
enum c_inline_static_type);
extern void c_init_decl_processing (void);
extern void c_print_identifier (FILE *, tree, int);
extern int quals_from_declspecs (const struct c_declspecs *);
extern struct c_declarator *build_array_declarator (location_t, tree,
struct c_declspecs *,
bool, bool);
extern tree build_enumerator (location_t, location_t, struct c_enum_contents *,
tree, tree);
extern tree check_for_loop_decls (location_t, bool);
extern void mark_forward_parm_decls (void);
extern void declare_parm_level (void);
extern void undeclared_variable (location_t, tree);
extern tree lookup_label_for_goto (location_t, tree);
extern tree declare_label (tree);
extern tree define_label (location_t, tree);
extern struct c_spot_bindings *c_get_switch_bindings (void);
extern void c_release_switch_bindings (struct c_spot_bindings *);
extern bool c_check_switch_jump_warnings (struct c_spot_bindings *,
location_t, location_t);
extern void finish_decl (tree, location_t, tree, tree, tree);
extern tree finish_enum (tree, tree, tree);
extern void finish_function (void);
extern tree finish_struct (location_t, tree, tree, tree,
struct c_struct_parse_info *);
extern struct c_arg_info *build_arg_info (void);
extern struct c_arg_info *get_parm_info (bool, tree);
extern tree grokfield (location_t, struct c_declarator *,
struct c_declspecs *, tree, tree *);
extern tree groktypename (struct c_type_name *, tree *, bool *);
extern tree grokparm (const struct c_parm *, tree *);
extern tree implicitly_declare (location_t, tree);
extern void keep_next_level (void);
extern void pending_xref_error (void);
extern void c_push_function_context (void);
extern void c_pop_function_context (void);
extern void push_parm_decl (const struct c_parm *, tree *);
extern struct c_declarator *set_array_declarator_inner (struct c_declarator *,
struct c_declarator *);
extern tree c_builtin_function (tree);
extern tree c_builtin_function_ext_scope (tree);
extern void shadow_tag (const struct c_declspecs *);
extern void shadow_tag_warned (const struct c_declspecs *, int);
extern tree start_enum (location_t, struct c_enum_contents *, tree);
extern bool start_function (struct c_declspecs *, struct c_declarator *, tree);
extern tree start_decl (struct c_declarator *, struct c_declspecs *, bool,
tree);
extern tree start_struct (location_t, enum tree_code, tree,
struct c_struct_parse_info **);
extern void store_parm_decls (void);
extern void store_parm_decls_from (struct c_arg_info *);
extern void temp_store_parm_decls (tree, tree);
extern void temp_pop_parm_decls (void);
extern tree xref_tag (enum tree_code, tree);
extern struct c_typespec parser_xref_tag (location_t, enum tree_code, tree);
extern struct c_parm *build_c_parm (struct c_declspecs *, tree,
struct c_declarator *, location_t);
extern struct c_declarator *build_attrs_declarator (tree,
struct c_declarator *);
extern struct c_declarator *build_function_declarator (struct c_arg_info *,
struct c_declarator *);
extern struct c_declarator *build_id_declarator (tree);
extern struct c_declarator *make_pointer_declarator (struct c_declspecs *,
struct c_declarator *);
extern struct c_declspecs *build_null_declspecs (void);
extern struct c_declspecs *declspecs_add_qual (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_type (location_t,
struct c_declspecs *,
struct c_typespec);
extern struct c_declspecs *declspecs_add_scspec (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_attrs (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_addrspace (source_location,
struct c_declspecs *,
addr_space_t);
extern struct c_declspecs *declspecs_add_alignas (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *finish_declspecs (struct c_declspecs *);
/* in c-objc-common.c */
extern bool c_objc_common_init (void);
extern bool c_missing_noreturn_ok_p (tree);
extern bool c_warn_unused_global_decl (const_tree);
extern void c_initialize_diagnostics (diagnostic_context *);
extern bool c_vla_unspec_p (tree x, tree fn);
/* in c-typeck.c */
extern int in_alignof;
extern int in_sizeof;
extern int in_typeof;
extern tree c_last_sizeof_arg;
extern location_t c_last_sizeof_loc;
extern struct c_switch *c_switch_stack;
extern tree c_objc_common_truthvalue_conversion (location_t, tree);
extern tree require_complete_type (location_t, tree);
extern bool same_translation_unit_p (const_tree, const_tree);
extern int comptypes (tree, tree);
extern int comptypes_check_different_types (tree, tree, bool *);
extern bool c_vla_type_p (const_tree);
extern bool c_mark_addressable (tree, bool = false);
extern void c_incomplete_type_error (location_t, const_tree, const_tree);
extern tree c_type_promotes_to (tree);
extern struct c_expr default_function_array_conversion (location_t,
struct c_expr);
extern struct c_expr default_function_array_read_conversion (location_t,
struct c_expr);
extern struct c_expr convert_lvalue_to_rvalue (location_t, struct c_expr,
bool, bool);
extern tree decl_constant_value_1 (tree, bool);
extern void mark_exp_read (tree);
extern tree composite_type (tree, tree);
extern tree build_component_ref (location_t, tree, tree, location_t);
extern tree build_array_ref (location_t, tree, tree);
extern tree build_external_ref (location_t, tree, bool, tree *);
extern void pop_maybe_used (bool);
extern struct c_expr c_expr_sizeof_expr (location_t, struct c_expr);
extern struct c_expr c_expr_sizeof_type (location_t, struct c_type_name *);
extern struct c_expr parser_build_unary_op (location_t, enum tree_code,
struct c_expr);
extern struct c_expr parser_build_binary_op (location_t,
enum tree_code, struct c_expr,
struct c_expr);
extern tree build_conditional_expr (location_t, tree, bool, tree, tree,
location_t, tree, tree, location_t);
extern tree build_compound_expr (location_t, tree, tree);
extern tree c_cast_expr (location_t, struct c_type_name *, tree);
extern tree build_c_cast (location_t, tree, tree);
extern void store_init_value (location_t, tree, tree, tree);
extern void maybe_warn_string_init (location_t, tree, struct c_expr);
extern void start_init (tree, tree, int, rich_location *);
extern void finish_init (void);
extern void really_start_incremental_init (tree);
extern void finish_implicit_inits (location_t, struct obstack *);
extern void push_init_level (location_t, int, struct obstack *);
extern struct c_expr pop_init_level (location_t, int, struct obstack *,
location_t);
extern void set_init_index (location_t, tree, tree, struct obstack *);
extern void set_init_label (location_t, tree, location_t, struct obstack *);
extern void process_init_element (location_t, struct c_expr, bool,
struct obstack *);
extern tree build_compound_literal (location_t, tree, tree, bool,
unsigned int);
extern void check_compound_literal_type (location_t, struct c_type_name *);
extern tree c_start_case (location_t, location_t, tree, bool);
extern void c_finish_case (tree, tree);
extern tree build_asm_expr (location_t, tree, tree, tree, tree, tree, bool,
bool);
extern tree build_asm_stmt (bool, tree);
extern int c_types_compatible_p (tree, tree);
extern tree c_begin_compound_stmt (bool);
extern tree c_end_compound_stmt (location_t, tree, bool);
extern void c_finish_if_stmt (location_t, tree, tree, tree);
extern void c_finish_loop (location_t, tree, tree, tree, tree, tree, bool);
extern tree c_begin_stmt_expr (void);
extern tree c_finish_stmt_expr (location_t, tree);
extern tree c_process_expr_stmt (location_t, tree);
extern tree c_finish_expr_stmt (location_t, tree);
extern tree c_finish_return (location_t, tree, tree);
extern tree c_finish_bc_stmt (location_t, tree *, bool);
extern tree c_finish_goto_label (location_t, tree);
extern tree c_finish_goto_ptr (location_t, tree);
extern tree c_expr_to_decl (tree, bool *, bool *);
extern tree c_finish_omp_construct (location_t, enum tree_code, tree, tree);
extern tree c_finish_oacc_data (location_t, tree, tree);
extern tree c_finish_oacc_host_data (location_t, tree, tree);
extern tree c_begin_omp_parallel (void);
extern tree c_finish_omp_parallel (location_t, tree, tree);
extern tree c_begin_omp_task (void);
extern tree c_finish_omp_task (location_t, tree, tree);
extern void c_finish_omp_cancel (location_t, tree);
extern void c_finish_omp_cancellation_point (location_t, tree);
extern tree c_finish_omp_clauses (tree, enum c_omp_region_type);
extern tree c_build_va_arg (location_t, tree, location_t, tree);
extern tree c_finish_transaction (location_t, tree, int);
extern bool c_tree_equal (tree, tree);
extern tree c_build_function_call_vec (location_t, vec<location_t>, tree,
vec<tree, va_gc> *, vec<tree, va_gc> *);
extern tree c_omp_clause_copy_ctor (tree, tree, tree);
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement that specifies a return value is seen. */
extern int current_function_returns_value;
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement with no argument is seen. */
extern int current_function_returns_null;
/* Set to 0 at beginning of a function definition, set to 1 if
a call to a noreturn function is seen. */
extern int current_function_returns_abnormally;
/* In c-decl.c */
/* Tell the binding oracle what kind of binding we are looking for. */
enum c_oracle_request
{
C_ORACLE_SYMBOL,
C_ORACLE_TAG,
C_ORACLE_LABEL
};
/* If this is non-NULL, then it is a "binding oracle" which can lazily
create bindings when needed by the C compiler. The oracle is told
the name and type of the binding to create. It can call pushdecl
or the like to ensure the binding is visible; or do nothing,
leaving the binding untouched. c-decl.c takes note of when the
oracle has been called and will not call it again if it fails to
create a given binding. */
typedef void c_binding_oracle_function (enum c_oracle_request, tree identifier);
extern c_binding_oracle_function *c_binding_oracle;
extern void c_finish_incomplete_decl (tree);
extern tree c_omp_reduction_id (enum tree_code, tree);
extern tree c_omp_reduction_decl (tree);
extern tree c_omp_reduction_lookup (tree, tree);
extern tree c_check_omp_declare_reduction_r (tree *, int *, void *);
extern void c_pushtag (location_t, tree, tree);
extern void c_bind (location_t, tree, bool);
extern bool tag_exists_p (enum tree_code, tree);
/* In c-errors.c */
extern bool pedwarn_c90 (location_t, int opt, const char *, ...)
ATTRIBUTE_GCC_DIAG(3,4);
extern bool pedwarn_c99 (location_t, int opt, const char *, ...)
ATTRIBUTE_GCC_DIAG(3,4);
extern void
set_c_expr_source_range (c_expr *expr,
location_t start, location_t finish);
extern void
set_c_expr_source_range (c_expr *expr,
source_range src_range);
/* In c-fold.c */
extern vec<tree> incomplete_record_decls;
#if CHECKING_P
namespace selftest {
extern void run_c_tests (void);
} // namespace selftest
#endif /* #if CHECKING_P */
#endif /* ! GCC_C_TREE_H */
|
forces.c | #include <stdint.h>
#include <omp.h>
#include "geometry.h"
#include "core_kernel.h"
#include "bench.h"
#include "phy.h"
static void
_KRN_ComputeForces(
const size_t snfc,
const uint32_t bsz,
const uint32_t *snfic,
const uint32_t *fptrn0,
const uint32_t *fptrn1,
const uint32_t *fptrn2,
const double* xyzx0,
const double* xyzx1,
const double* xyzx2,
const double *q,
double forces[])
{
double lift = 0.f;
double drag = 0.f;
double momn = 0.f;
uint32_t i;
for(i = 0; i < snfc; i++)
{
uint32_t if0 = snfic[i];
uint32_t if1 = snfic[i+1];
uint32_t j;
#pragma omp parallel for reduction(+: lift, drag, momn)
for(j = if0; j < if1; j++)
{
uint32_t n0 = fptrn0[j];
uint32_t n1 = fptrn1[j];
uint32_t n2 = fptrn2[j];
double x0 = xyzx0[n0];
double y0 = xyzx1[n0];
double z0 = xyzx2[n0];
double x1 = xyzx0[n1];
double y1 = xyzx1[n1];
double z1 = xyzx2[n1];
double x2 = xyzx0[n2];
double y2 = xyzx1[n2];
double z2 = xyzx2[n2];
/* Delta coordinates in all directions */
double ax = x1 - x0;
double ay = y1 - y0;
double az = z1 - z0;
double bx = x2 - x0;
double by = y2 - y0;
double bz = z2 - z0;
/*
Norm points outward, away from grid interior.
Norm magnitude is area of surface triangle.
*/
double xnorm = ay * bz;
xnorm -= az * by;
xnorm = -0.5f * xnorm;
double ynorm = ax * bz;
ynorm -= az * bx;
ynorm = 0.5f * ynorm;
/* Pressure values store at every face node */
double p0 = q[bsz * n0];
double p1 = q[bsz * n1];
double p2 = q[bsz * n2];
double press = (p0 + p1 + p2) / 3.f;
double cp = 2.f * (press - 1.f);
double dcx = cp * xnorm;
double dcy = cp * ynorm;
double xmid = x0 + x1 + x2;
double ymid = y0 + y1 + y2;
lift = lift - dcx * V + dcy * U;
drag = drag + dcx * U + dcy * V;
momn = momn + (xmid - 0.25f) * dcy - ymid * dcx;
}
}
forces[0] = lift;
forces[1] = drag;
forces[2] = momn;
}
void
ComputeForces(const GEOMETRY *g, double forces[])
{
//BENCH start_bench = rdbench();
_KRN_ComputeForces(
g->t->sz,
g->c->b,
g->t->i,
g->b->fc->fptr->n0,
g->b->fc->fptr->n1,
g->b->fc->fptr->n2,
g->n->xyz->x0,
g->n->xyz->x1,
g->n->xyz->x2,
g->q->q,
forces);
//fun3d_log(start_bench, KERNEL_FORCES);
} |
GB_unaryop__minv_bool_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_bool_fp32
// op(A') function: GB_tran__minv_bool_fp32
// C type: bool
// A type: float
// cast: ;
// unaryop: cij = true
#define GB_ATYPE \
float
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = true ;
// casting
#define GB_CASTING(z, x) \
; ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_BOOL || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_bool_fp32
(
bool *restrict Cx,
const float *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_bool_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
odf_fmt_plug.c | /* ODF cracker patch for JtR. Hacked together during Summer of 2012 by
* Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted. */
#if FMT_EXTERNS_H
extern struct fmt_main fmt_odf;
#elif FMT_REGISTERS_H
john_register_one(&fmt_odf);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 64
#endif
#endif
#include "arch.h"
#include "johnswap.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "sha.h"
#include "sha2.h"
#include <openssl/blowfish.h>
#include "aes.h"
#include "pbkdf2_hmac_sha1.h"
#include "memdbg.h"
#define FORMAT_LABEL "ODF"
#define FORMAT_TAG "$odf$*"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define FORMAT_NAME ""
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "SHA1/SHA256 " SHA1_ALGORITHM_NAME " BF/AES"
#else
#define ALGORITHM_NAME "SHA1/SHA256 BF/AES 32/" ARCH_BITS_STR " " SHA2_LIB
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 20
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN sizeof(uint32_t)
#define SALT_ALIGN sizeof(int)
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests odf_tests[] = {
{"$odf$*0*0*1024*16*df6c10f64d191a841812af53874b636d014ce3fe*8*07e28aff39d2660e*16*b124be9f3346fb77e0ebcc3bb80028f8*0*2276a1077f6a2a027bd565ce89824d6a20086e378876be05c4b8e3796a460e828c9803a692caf7a53492c220d1d7ecbf4e2d336c7abf5a7672acc804ca267318252cbc13676616d1fde38820f9fbeef1360067d9de096ba8c1032ae947bde1d0fedaf37b6020663d49faf36b7c095c5b9aae11c8fc2be74148f008edbdbb180b44028ad8259f1215b483542bf3027f56dee5f962448333b30f88e6ae4790b60d24abb286edff9adee831a4b3351fc47259043f0d683d7a25be7e47aff3aedca140005d866e218c8efcca32093c19bbece50bd96656d0f94a712d3c60d1e5342db86482fc73f05faf513ca0b137378126597b95986c372b412c953e97011259aab0839fe453c756559497a28ba88dce009e1e7980436131029d38e56a34f608e6471970d9959068808c898608024db9eb394c4feae7a364ea9272ec4ea2315a9f0407a4b27d5e49a8ab1e3ddce5c84927d5aecd7e68e4437a820ea8743c6b5b4e2abbb47b0001e2f77ceac4603e8774e4ccbc1adde794428c11ae4a7492727b620334302e63f72b0c06c1cf83800366916ee8295176819272d557863a831ee0a576841191482959aad69095831fa1d64e3e0e6f6c6a751bcdadf0fbaa27a17458709f708c04587cb208984c9525da6786e0e5aabefe30ad1dbbef66e85ce9d6dbe456fd85e4135de5cf16d9455976d7ca8de7b1b530661c74c0fae90c0fff1a2b5fcdfab19fcff75fadcec445ed8af6ab5babf1463e08458918be8045083de6db988c37e4be582cfac5cdf741d1f0322fb2902665c7ff347813348109e5d442e91fcb010c28f042da481e807084fcb4759b40ccf2cae77bad00cdfbfba4acf36aa1f74c30a315e3d7f1ca522b6306e8903352aafa51dc523d582d418934398d5eb88120e3656bfb640a239db507b285302a86855ea850ddc9af72fc62dc79336c9bc29ee8314c65adb0574e9c701d73d7fa977edd1d52a1ff2da5b8b94e1a0fdd01ffcc6583758f0a1f51750e45f12b58c6d38b140e5676cf3474224520ef7c52ca5e634f85456651f3d6f43d016ed7cc5da54ea640a3bc50c2b9d3dea8f93c0340d66ccd06efc5ae002108c33cf3a470c4a50f6a6ca2f11b8ad15511688c282b94ba6f1c332e239d10946dc46f763f08d12cb9edc1e79c0e07f7151f548e6d7d20ec13b52d911bf980cac60694e192651403c9a69abea045190e847be093fc9ba43fec55b32f77f5796ddca25b441f259d5c51e06df6c6588c6414899481ba9e06bcebec58f82ff3021b09c6beae13a5d22bc94870f72ab813d0c0be01d91f3d075192e7a5de765599d72244757d09539529a8347e077a36678166e5ed9f73a5aad2e147d8154095c397e3e5e4ba1987ca64c1301a0c6c3e438097ede9b701a105ec38fcb54abb31b367c7740cd9ac459e561094a34f01acee555e60267157e6", "test"},
{"$odf$*1*1*1024*32*61802eba18eab842de1d053809ba40927fd40b26c69ddeca6a8a652ed9c16a28*16*c5c0815b931f313627100d592a9c972f*16*e9a48b7daff738deaabe442007fb2ec4*0*be3b65ea09642c2b4fdc23e553e1f5304bc5df222b624c6373d53e674f5df01fdb8873cdab7a5a685fa45ad5441a9d8869401b7fa076c488ad53fd9971e97244ecc9416484450d4fb2ee4ec08af4044d7def937e6545dea2ce36bd5c57b1f46b11b9cf90c8fb3accff149ce2d54820b181b9124db9aac131f6436d77cf716423f04d42438eed6f9ca14bd24b9b17d3478176addd5fa0254bf986fccd879e326485790e28b94ad5306868734b5ac1b1ddb3f876382dee6e9428e8230e84bf11b7e85ccbae8b4b424cd73160c380f874b37fbe3c7e88c13ef4bde74b56507d17095c2c32bb8bcded0637e4403107bb33252f72f5886a91b7720fe32a8659a09c217717e4c74a7c2e09fc40b46aa288309a36e86b9f1856e1bce176bc9690555431e05c7b67ff95df64f8f40053079bfc9dda021ab2714fecf74398b867ebef675958f29eaa15eb631845e358a0c5caff0b824a2a69a6eabee069d3d6236d77709fd60438c9e3ad9e42b26810375e1e587eff105ac295327ef8bf66f6462388b7727ec32d6abde2f8d6126b185124bb437753663f6ab1f321ddfdb36d9f1f528729492e0b1bb8d3b9eda3c86c1997c92b902f5160f77587c37e45b5c133b5d9709fea910a2e9b54c0960b0ebc870cdbb858aabe07ed27cba86d29a7e64c6e3863131859314a14e64c1168d4a2d5ca0697853fb1fe969ba968e31359881d51edce287eff415de8e60cec2068bb82157fbcf0cf9a95e92cb23f32e6156daced4bee6ba8c8b41174d01fcd7662911bcc10d5b4478f8209ce3b91075d10529780be4f17e841a1f1833d432c3dc854908643e58b03c8860dfbc710a29f79f75ea262cfcef9cd67fb67d73f55b300d42f4577445af2b9f224620204cfb88de2cbf57931ac0e0f8d98259a41d744cad6a58abc7761c266f4e93aca19356b07073c09ae9d1976f4f2e1a76c350cc7764c27ae257eb69ba4213dd0a7794fa83d220439a398efd988b6dbf0de4c08bc3e4830c9e482b9e0fd1679f14e6f132cf06bae1d763dde7ce6f525ff9a0ebad28aeca16496194f2a6263a20e7afeb43d83c8c936130d6508f2bf68b5ca50375948424193a7fb1106fdf63ff72896e1b2633907f01a693218e3303436542bcf2af24cc4a41621c36768ce9a84d32cc9f3c2b108bfc78c25b1c2ea94e6e0d65406f78bdb8bc33c94a9550e5cc3e995cfbd31da03afb929418acdc89b099415f9bdb7dab7a75d44a696e14b031d601ad8d907e14a28044706c0c2955df2cb34ffea82af367e487b6cc928dc87a33fc7555173e7faa5cfd1af6d3d6f496f23a9579db22dd4a2c16e950fdc90696d95a81183765a4fbddb42c488d40ac1de28483cf1cdddf821d3f859c57b13cb7f21a916bd0d89438a17634c68637f23e2544589e8ae5ee5bced91680c087cb3105cd74a09e88d3aae17d75e", "test"},
{"$odf$*0*0*1024*16*43d3dbd907785c4fa5282a2e73a5914db3372505*8*b3d676d4519e6b5a*16*34e3f7fdfa67fb0078360b0df4011270*0*7eff7a7abf1e6b0c4a9fafe6bdcfcfeaa5b1886592a52bd255f1b51096973d6fa50d792c695f3ef82c6232ae7f89c771e27db658258ad029e82415962b270d2c859b0a3efb231a0519ec1c807082638a9fad7537dec22e20d59f2bfadfa84dd941d59dd07678f9e60ffcc1eb27d8a2ae47b616618e5e80e27309cd027724355bf78b03d5432499c1d2a91d9c67155b7f49e61bd8405e75420d0cfb9e64b238623a9d8ceb47a3fdb5e7495439bb96e79882b850a0c8d3c0fbef5e6d425ae359172b9a82ec0566c3578a9f07b86a70d75b5ad339569c1c8f588143948d63bdf88d6ed2e751ac07f25ecc5778dc06247e5a9edca869ee3335e5dae351666a618d00ec05a35bc73d330bef12a46fb53b2ff96e1b2919af4e692730b9c9664aca761df10d6cf55396c4d4c268e6e96c96515c527c8fe2716ac7a9f016941aa46e6b03e8a5069c29ec8e8614b7da3e2e154a77510393051a0b693ae40da6afb5712a4ce4ac0ebacda1f45bdccc8a7b21e153d1471665cae3205fbfa00129bf00c06777bfecba2c43a1481a00111b4f0bd30c2378bd1e2e219700406411c6f897a3dfa51b31613cb241d56b68f3c241428783b353be26fa8b2df68ca215d1cf892c10fdef94faf2381a13f8cb2bce1a7dbb7522ef0b2a83e5a96ca66417fd2928784054e80d74515c1582ad356dd865837b5ea90674a30286a72a715f621c9226f19a321b413543fbbdb7cd9d1f99668b19951304e7267554d87992fbf9a96116601d0cee9e23cb22ba474c3f721434400cacf15bae05bbe9fa17f69967d03689c48a26fa57ff9676c96767762f2661b6c8f8afa4f96f989086aa02b6f8d039c6f4d158cc33a56cbf77640fb5087b2d5a5251692bb9255d0ae8148c7157c40031fdb0ea90d5fab546a7e1e1c15bd6a27f3716776c8a3fdbdd4f34c19fef22c36117c124876606b1395bf96266d647aaf5208eefd729a42a4efe42367475315a979fb74dcb9cd30917a811ed8283f2b111bb5a5d2b0f5589b3652f17d23e352e1494f231027bb93209e3c6a0388f8b2214577dca8aa9d705758aa334d6947491488770ed8066f692f8922ff0d852c2d0f965ab3d8a13c6de0ef3cff5a15ee7b64f9b1003817f0cb919ad021d5f3b0b5c1ad58db22e8fbd63abfb40e61065bad008cdffbbe3c563780a548f4515df5c935d9aa2a3033bc8a4011c9c173a0366c9b7b07f2a27de0e55373fb4b0c7726997be6f410a2ee5980393ea005516e89538be796131e450403420d72cdbd75475fd11c50efce5eb340d55d2dd0a67ca45ddb53aa582a2ec56b46452e26a505bf730998513837c96a121e4ad13af5030392ff7fb660955e03f65894733862f2367d529f0e8cdb73272b9ce01491747cb3e1a22f5c85ab6d40ddd35d15b9d46d73600e0971da90f93cb0e9be357c4f1227fbf5b123e5b", "jumper9"},
{"$odf$*0*0*1024*16*4ec0370ab589f943131240e407a35b58a341e052*8*19cadc01889f78c0*16*dcfcb8baccda277764e4e99833ab9640*0*a7bd859d68298fbdc36b6b51eb06f7055befe08f76ca9833c6e298db8ed971bfd1315065a19e1b31b8a93624757a2583816f35d6f251ff7943be626b3dc72f0b320c9ce5d80b7cc676aa02e6a4996abd752da573ecc339d2c80a2c8bfc28a9f4ceea51c2969adf20c8762b2ee0b1835bbd31bd90d5a638cfe523a596ea95feca64ae20010ad9957a724143e25a875f3cec3cedb4df1c16ac82b46b35db269da98270c813acd5e55a2c138306decdf96b1c1079d9cfd3704d519fbc5a4a547ba5286a7e80dc434f1bf34260433cbb79c4bcbb2a5bfc5a6c2430944ef2e34e7b9c76b21a97003c1fa85f6e9c4ed984108a7d301afe4a8f6625502a4bf17b24e009717c711571da2d6acd25868892bb9e29a77da8018222cd57c91d9aad96c954355e50a4760f08aa1f1b4257f7eb1a235c9234e8fc4ed97e8ad3e5d7d128807b726a4eb0038246d8580397c0ff5873d34b5a688a4a931be7c5737e5ada3e830b02d3efb075e338d71be55751a765a21d560933812856986a4d0d0a6d4954c50631fa3dff8565057149c4c4951858be4d5dca8e492093cfd88b56a19a161e7595e2e98764e91eb51c5289dc4efa65c7b207c517e269e3c699373fe1bf177c5d641cf2cfa4bd2afe8bff53a98b2d64bedc5a2e2f2973416c66791cf012696a0e95f7a4dadb86f925fc1943cb2b75fb3eda30f7779edff7cce95ae6f0f7b45ac207a4de4ec012a3654103136e11eb496276647d5e8f6e1659951fc7ef78d60e9430027e826f2aaab7c93ef58a5af47b92cec2f17903a26e2cc5d8d09b1db55e568bfb23a6b6b46125daf71a2f3a708676101d1b657cd38e81deb74d5d877b3321349cd667c29359b45b82218ad96f6c805ac3439fc63f0c91d66da36bae3f176c23b45b8ca1945fb4a4cea5c4a7b0f6ffd547614e7016f94d3e7889ccac868578ea779cd7e6b015aafd296dd5e2da2aa7e2f2af2ce6605f53613f069194dff35ffb9a2ebb30e011c26f669ededa2c91ffb06fedc44cf23f35d7d2716abcd50a8f561721d613d8f2c689ac245a5ac084fa86c72bbe80da7d508e63d891db528fa9e8f0d608034cd97dfde70f739857672e2d70070e850c3a6521067c1774244b86cca835ca8ff1748516e694ea2b5b42555f0df9cb9ec78825c351df51a76b6fe23b58ab3e87ba94ffbb98c9fa9d50c0c282ed0e506bcad24c02d8b625b4bdac822a9e5c911d095c5e4d3bf03448add978e0e7fab7f8a7008568f01a4f06f155223086bdcfe6879e76f199afb9caeadebaa9ec4ec8120f4ccfc4f5f7d7e3cc4dd0cba4d11546d8540030769c4b6d54abdd51fa1f30da642e5ff5c35d3e711c8931ff79e9f256ac6416e99943b0000bf32a5efdd5cf1cd668a62381febe959ca472be9c1a9bade59dbba07eb035ddb1e64ae2923bd276deed788db7600d776f49339215", "RickRoll"},
{"$odf$*0*0*1024*16*399a33262bbef99543bae29a6bb069c36e3a8f1b*8*6b721193b04fa933*16*99a6342ca7221c81890035dc5033c16f*0*ef8692296b67a8a77344e87b6193dc0a370b115d9e8c85e901c1a19d03ee2a34b7bf989bf9c2edab61022ea49f2a3ce5a6c807af374afd21b52ccbd0aa13784c73d2c8feda1fe0c8ebbb94e46e32904d95d1f135759e2733c2bd30b8cb0050c1cb8a2336c1151c498b9609547e96243aed9473e0901b55137ed78e2c6057e5826cfbfb94b0d77cb12b1fb6ac2752ea71c9c05cdb6a2f3d9611cb24f6e23065b408601518e3182ba1b8cef4cfcdf6ceecb2f33267cf733d3da715562e6977015b2b6423fb416781a1b6a67252eec46cda2741163f86273a68cd241a06263fdd8fc25f1c30fd4655724cc3e5c3d8f3e84abf446dd545155e440991c5fa613b7c18bd0dabd1ad45beb508cfb2b08d4337179cba63df5095b3d640eadbd72ca07f5c908241caf384ca268355c0d13471c241ea5569a5d04a9e3505883eb1c359099c1578e4bc33a73ba74ceb4a0520e0712e3c88582549a668a9c11b8680368cfbc3c5ec02663ddd97963d9dacefed89912ffa9cd945a8634a653296163bb873f3afd1d02449494fab168e7f652230c16d35853df1164219c04c4bd17954b85eb1939d87412eeeb2a039a8bb087178c03a9a40165a28a985e8bc443071b3764d846d342ca2073223f9809fe2ee3a1dfa65b9d897877ebb33a48a760c8fb32062b51a96421256a94896e93b41f559fdec7743680a8deacff9132d6129574d1a62be94308b195d06a275947a1455600030468dde53639fd239a8ab074ec1c7f661f2c9e8d60d6e0e743d351017d5c3d3be21b67d05310d0c5f3fd670acd95ca24f91b0d84d761d15259848f736ff08610e300c31b242f6d24ac2418cdd1fe0248f8a2a2f5775c08e5571c8d25d65ff573cc403ea9cad3bafd56c166fbcec9e64909df3c6ec8095088a8992493b7180c4dbb4053dcb55d9c5f46d728a97ae4ec7ac4b5941bcc3b64a4af31f7dc673e6715a52c9cdbe23dc21e51784f8314c019fc90e8612fcffe01d026fd9e15d1474e73dedf1d3830da81320097be6953173e4293372b5e5a8ecc49ac8b1a658cff16ffa04a8c1728d02ab67694170f10bc9030939ff6df3f901faa019d9b9fd2ba23e89eb0bbaf7a69a2272ee1df0403e6435aee147da217e8bf4c1ee5c53eb83aac1b3f8772d5cd2a2686f312ac4f4f2b0733593e28305a550dbbd18d3405a464ff20e0d9364cfe49b82a97ef7303aec92004a3476cf9ad012eaaf10fd07d3823e1b6871e82113ecfe4392854de9ab21ab1e33ce93d1abb07018007f50d641c8eb85b28fd335fd2281745772c98f8f0bba3f4d40ba602545ef8a0db3062f02d7ee5f49b42cbe19c0c2124952f98c49aff6927110314e54fe8d47a10f13d2d4055c1f3f2d679d4043c9b2f68b2220b6c6c738f6402c01d000c9394c8ed27e70c7ee6108d3e7e809777bab9be30b33a3fb83271cbf3b", "WhoCanItBeNow"},
/* CMIYC 2013 "pro" hard hash */
{"$odf$*1*1*1024*32*7db40092b3857fa319bc0d717b60cefc40b1d51ef92ebc893c518ffebffdf200*16*5f7c8ab6e5d1c41dbd23c384fee957ed*16*9ff092f2dd29dab6ce5fb43ad7bbdd5a*0*bac8343436715b40aaf4690a7dc57b0f82b8f25f8ad0f9833e32468410d4dd02e387a067872b5847adc9a276c86a03113e11b903854202eec361c5b7ba74bcb254a4f76d97ca45dbe30fe49f78ce9cf7df0246ae4524b8f13ad28357838559c116d9ed59267f4df91da3ea9758c132e2ebc40fd4ee8e9978921a0847d7ca5c30ef911e0b88f9fc84039633eacf5e023c82dd1a573abd7663b8f36a039d42ed91b4a0665902f174be8cefefd367ba9b5da95768550e567242f1b2e2c3866eb8aa3c12d0b34277929616319ea29dd9a3b9addb963d45c7d4c2b54a99b0c1cf24cac3e981ed4e178e621938b83be30f54d37d6425a0b7ac9dff5504830fe1d1f136913c32d8f732eb55e6179ad2699fd851af3a44f8ca914117344e6fadf501bf6f6e0ae7970a2b58eb3af0d89c78411c6adde8aa1f0e8b69c261fd04835cdc3ddf0a6d67ddff33995b5cc7439db83f90c8a2e07e2513771fffcf8b55ce1a382b14ffbf22be9bdd6f83a9b7602995c9793dfffb32c9eb16930c0bb55e5a8364fa06a59fca5af27df4a02565db2b4718ed44405f67a052738692c189039a7fd63713207616eeeebace3c0a3963dd882c485523f49fa0bc2663fc6ef090a220dd5c6554bc0702da8c3122383ea8a009837d549d58ad688c9cc4b8461fe70f4600539cd1d82edd4e110b1c1472dae40adc3126e2a09dd2753dcd83799841745160e235652f601d1257268321f22d19bd9dc811afaf143765c7cb53717ea329e9e4064a3cf54b33d006e93b83102e2ad3327f6d995cb598bd96466b1287e6da9967f4f034c63fd06c6e5c7ec25008c122385f271d18918cff3823f9fbdb37791e7371ce1d6a4ab08c12eca5fceb7c9aa7ce25a8bd640a68c622ddd858973426cb28e65c4c3421b98ebf4916b8c2bfe71b2afec4ab2f99291a4c4d3312521850d46436aecd9e2e93a8619dbc3c1caf4507bb488ce921cd8d13a1640e6c49403e0416924b3b1a01c9939c7bcdec50f057d6f4dccf0afc8c2ad37c4f8429c77cf19ad49db5e5219e965a3ed5d56d799689bd93642602d7959df0493ea62cccff83e66d85bf45d6b5b03e8cfca84daf37ecfccb60f85f3c5102900a02a5df015b1bf1ef55dfb2ab20321bcf3325d1adce22d4456837dcc589ef36d4f06ccdcc96ef10ff806d76f0044e92e192b946ae0f09860a38c2a6052fe84c3e9bb9380e2b344812376c6bbd5c9858745dbd072798a3d7eff31ae5d509c11b5269ec6f2108cb6e72a5ab495ea7aed5bf3dabedbb517dc4ceff818a8e890a6ea9a91bab37e8a463a9d04993c5ba7e40e743e033842540806d4a65258d0f4d5988e1e0011f0e85fcae3b2819c1f17f5c7980ecd87aee425cdab4f34bfb7a31ee7936c60f2f4f52aea67aef4736a419dc9c559279b569f61995eb2d6b7c204c3e9f56ca5c8a889812a30c33", "juNK^r00M!"},
{NULL}
};
#if defined (_OPENMP)
static int omp_t = 1;
#endif
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[32 / sizeof(uint32_t)];
static struct custom_salt {
int cipher_type;
int checksum_type;
int iterations;
int key_size;
int iv_length;
int salt_length;
int content_length;
unsigned char iv[16];
unsigned char salt[32];
unsigned char content[1024];
} *cur_salt;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy;
char *keeptr;
char *p;
int res, extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN;
if ((p = strtokm(ctcopy, "*")) == NULL) /* cipher type */
goto err;
if (strlen(p) != 1)
goto err;
res = atoi(p);
if (res != 0 && res != 1)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* checksum type */
goto err;
if (strlen(p) != 1)
goto err;
res = atoi(p);
if (res != 0 && res != 1)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iterations */
goto err;
if (!isdec(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* key size */
goto err;
res = atoi(p);
if (res != 16 && res != 32)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* checksum field (skipped) */
goto err;
res = hexlenl(p, &extra);
if (extra)
goto err;
if (res != BINARY_SIZE * 2 && res != 64) // 2 hash types.
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iv length */
goto err;
res = atoi(p);
if (res > 16 || res < 0)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iv */
goto err;
if (hexlenl(p, &extra) != res * 2 || extra)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt length */
goto err;
if (strlen(p) >= 10)
goto err;
res = atoi(p);
if (res > 32 || res < 0)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt */
goto err;
if (hexlenl(p, &extra) != res * 2 || extra)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* something */
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* content */
goto err;
res = strlen(p);
if (res > 2048 || res & 1)
goto err;
if (!ishexlc(p))
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += FORMAT_TAG_LEN; /* skip over "$odf$*" */
p = strtokm(ctcopy, "*");
cs.cipher_type = atoi(p);
p = strtokm(NULL, "*");
cs.checksum_type = atoi(p);
p = strtokm(NULL, "*");
cs.iterations = atoi(p);
p = strtokm(NULL, "*");
cs.key_size = atoi(p);
strtokm(NULL, "*");
/* skip checksum field */
p = strtokm(NULL, "*");
cs.iv_length = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.iv_length; i++)
cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.salt_length = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.salt_length; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
strtokm(NULL, "*");
p = strtokm(NULL, "*");
memset(cs.content, 0, sizeof(cs.content));
for (i = 0; p[i * 2] && i < 1024; i++)
cs.content[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
cs.content_length = i;
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE+1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN; /* skip over "$odf$*" */
strtokm(ctcopy, "*");
strtokm(NULL, "*");
strtokm(NULL, "*");
strtokm(NULL, "*");
p = strtokm(NULL, "*");
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
MEM_FREE(keeptr);
return out;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
unsigned char key[MAX_KEYS_PER_CRYPT][32];
unsigned char hash[MAX_KEYS_PER_CRYPT][32];
BF_KEY bf_key;
int bf_ivec_pos, i;
unsigned char ivec[8];
unsigned char output[1024];
SHA_CTX ctx;
#ifdef SIMD_COEF_32
int lens[MAX_KEYS_PER_CRYPT];
unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];
#endif
if (cur_salt->checksum_type == 0 && cur_salt->cipher_type == 0) {
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
SHA1_Init(&ctx);
SHA1_Update(&ctx, (unsigned char *)saved_key[index+i], strlen(saved_key[index+i]));
SHA1_Final((unsigned char *)(hash[i]), &ctx);
}
#ifdef SIMD_COEF_32
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = 20;
pin[i] = hash[i];
pout[i] = key[i];
}
pbkdf2_sha1_sse((const unsigned char**)pin, lens, cur_salt->salt,
cur_salt->salt_length,
cur_salt->iterations, pout,
cur_salt->key_size, 0);
#else
pbkdf2_sha1(hash[0], 20, cur_salt->salt,
cur_salt->salt_length,
cur_salt->iterations, key[0],
cur_salt->key_size, 0);
#endif
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
bf_ivec_pos = 0;
memcpy(ivec, cur_salt->iv, 8);
BF_set_key(&bf_key, cur_salt->key_size, key[i]);
BF_cfb64_encrypt(cur_salt->content, output, cur_salt->content_length, &bf_key, ivec, &bf_ivec_pos, 0);
SHA1_Init(&ctx);
SHA1_Update(&ctx, output, cur_salt->content_length);
SHA1_Final((unsigned char*)crypt_out[index+i], &ctx);
}
}
else {
SHA256_CTX ctx;
AES_KEY akey;
unsigned char iv[16];
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
SHA256_Init(&ctx);
SHA256_Update(&ctx, (unsigned char *)saved_key[index+i], strlen(saved_key[index+i]));
SHA256_Final((unsigned char *)hash[i], &ctx);
}
#ifdef SIMD_COEF_32
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = 32;
pin[i] = hash[i];
pout[i] = key[i];
}
pbkdf2_sha1_sse((const unsigned char**)pin, lens, cur_salt->salt,
cur_salt->salt_length,
cur_salt->iterations, pout,
cur_salt->key_size, 0);
#else
pbkdf2_sha1(hash[0], 32, cur_salt->salt,
cur_salt->salt_length,
cur_salt->iterations, key[0],
cur_salt->key_size, 0);
#endif
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
memcpy(iv, cur_salt->iv, 16);
memset(&akey, 0, sizeof(AES_KEY));
if (AES_set_decrypt_key(key[i], 256, &akey) < 0) {
fprintf(stderr, "AES_set_decrypt_key failed!\n");
}
AES_cbc_encrypt(cur_salt->content, output, cur_salt->content_length, &akey, iv, AES_DECRYPT);
SHA256_Init(&ctx);
SHA256_Update(&ctx, output, cur_salt->content_length);
SHA256_Final((unsigned char*)crypt_out[index+i], &ctx);
}
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void odf_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
/*
* The format tests all have iteration count 1024.
* Just in case the iteration count is tunable, let's report it.
*/
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->iterations;
}
struct fmt_main fmt_odf = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT,
{
"iteration count",
},
{ FORMAT_TAG },
odf_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
odf_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
nodal_two_step_v_p_strategy_for_FSI.h | //
// Project Name: KratosPFEMFluidDynamicsApplication $
// Last modified by: $Author: AFranci $
// Date: $Date: June 2018 $
// Revision: $Revision: 0.0 $
//
//
#ifndef KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_FOR_FSI_H
#define KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_FOR_FSI_H
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/deprecated_variables.h"
#include "includes/cfd_variables.h"
#include "utilities/openmp_utils.h"
#include "processes/process.h"
#include "solving_strategies/schemes/scheme.h"
#include "solving_strategies/strategies/solving_strategy.h"
#include "custom_utilities/mesher_utilities.hpp"
#include "custom_utilities/boundary_normals_calculation_utilities.hpp"
#include "geometries/geometry.h"
#include "utilities/geometry_utilities.h"
#include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h"
#include "custom_strategies/builders_and_solvers/nodal_residualbased_elimination_builder_and_solver_for_FSI.h"
#include "custom_strategies/builders_and_solvers/nodal_residualbased_elimination_builder_and_solver_continuity_for_FSI.h"
#include "custom_strategies/builders_and_solvers/nodal_residualbased_block_builder_and_solver.h"
#include "custom_utilities/solver_settings.h"
#include "custom_strategies/strategies/gauss_seidel_linear_strategy.h"
#include "pfem_fluid_dynamics_application_variables.h"
#include "nodal_two_step_v_p_strategy.h"
#include "nodal_two_step_v_p_strategy_for_FSI.h"
#include <stdio.h>
#include <math.h>
#include <iostream>
#include <fstream>
namespace Kratos
{
///@addtogroup PFEMFluidDynamicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
template <class TSparseSpace,
class TDenseSpace,
class TLinearSolver>
class NodalTwoStepVPStrategyForFSI : public NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(NodalTwoStepVPStrategyForFSI);
/// Counted pointer of NodalTwoStepVPStrategy
//typedef boost::shared_ptr< NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> > Pointer;
typedef NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TDataType TDataType;
/// Node type (default is: Node<3>)
typedef Node<3> NodeType;
/// Geometry type (using with given NodeType)
typedef Geometry<NodeType> GeometryType;
typedef std::size_t SizeType;
//typedef typename BaseType::DofSetType DofSetType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer StrategyPointerType;
typedef TwoStepVPSolverSettings<TSparseSpace, TDenseSpace, TLinearSolver> SolverSettingsType;
using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mVelocityTolerance;
using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mPressureTolerance;
using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mMaxPressureIter;
using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mDomainSize;
using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mTimeOrder;
using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mReformDofSet;
using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mpMomentumStrategy;
using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mpPressureStrategy;
typedef GeometryType::ShapeFunctionsGradientsType ShapeFunctionDerivativesArrayType;
typedef GlobalPointersVector<Node<3>> NodeWeakPtrVectorType;
///@}
///@name Life Cycle
///@{
NodalTwoStepVPStrategyForFSI(ModelPart &rModelPart,
SolverSettingsType &rSolverConfig) : BaseType(rModelPart)
{
NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::InitializeStrategy(rSolverConfig);
}
NodalTwoStepVPStrategyForFSI(ModelPart &rModelPart,
/*SolverConfiguration<TSparseSpace, TDenseSpace, TLinearSolver>& rSolverConfig,*/
typename TLinearSolver::Pointer pVelocityLinearSolver,
typename TLinearSolver::Pointer pPressureLinearSolver,
bool ReformDofSet = true,
double VelTol = 0.0001,
double PresTol = 0.0001,
int MaxPressureIterations = 1, // Only for predictor-corrector
unsigned int TimeOrder = 2,
unsigned int DomainSize = 2) : BaseType(rModelPart,
pVelocityLinearSolver,
pPressureLinearSolver,
ReformDofSet,
VelTol,
PresTol,
MaxPressureIterations,
TimeOrder,
DomainSize)
{
KRATOS_TRY;
BaseType::SetEchoLevel(1);
// Check that input parameters are reasonable and sufficient.
this->Check();
bool CalculateNormDxFlag = true;
bool ReformDofAtEachIteration = false; // DofSet modifiaction is managed by the fractional step strategy, auxiliary strategies should not modify the DofSet directly.
// Additional Typedefs
typedef typename BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer BuilderSolverTypePointer;
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
//initializing fractional velocity solution step
typedef Scheme<TSparseSpace, TDenseSpace> SchemeType;
typename SchemeType::Pointer pScheme;
typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new ResidualBasedIncrementalUpdateStaticScheme<TSparseSpace, TDenseSpace>());
pScheme.swap(Temp);
//CONSTRUCTION OF VELOCITY
BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new NodalResidualBasedEliminationBuilderAndSolverForFSI<TSparseSpace, TDenseSpace, TLinearSolver>(pVelocityLinearSolver));
this->mpMomentumStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pVelocityLinearSolver, vel_build, ReformDofAtEachIteration, CalculateNormDxFlag));
this->mpMomentumStrategy->SetEchoLevel(BaseType::GetEchoLevel());
vel_build->SetCalculateReactionsFlag(false);
BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI<TSparseSpace, TDenseSpace, TLinearSolver>(pPressureLinearSolver));
this->mpPressureStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pPressureLinearSolver, pressure_build, ReformDofAtEachIteration, CalculateNormDxFlag));
this->mpPressureStrategy->SetEchoLevel(BaseType::GetEchoLevel());
pressure_build->SetCalculateReactionsFlag(false);
KRATOS_CATCH("");
}
/// Destructor.
virtual ~NodalTwoStepVPStrategyForFSI() {}
bool SolveSolutionStep() override
{
// Initialize BDF2 coefficients
ModelPart &rModelPart = BaseType::GetModelPart();
this->SetTimeCoefficients(rModelPart.GetProcessInfo());
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
bool timeIntervalChanged = rCurrentProcessInfo[TIME_INTERVAL_CHANGED];
bool converged = false;
unsigned int maxNonLinearIterations = mMaxPressureIter;
std::cout << "\n Solve with nodally_integrated_two_step_vp strategy at t=" << currentTime << "s" << std::endl;
if (timeIntervalChanged == true && currentTime > 10 * timeInterval)
{
maxNonLinearIterations *= 2;
}
if (currentTime < 10 * timeInterval)
{
if (BaseType::GetEchoLevel() > 1)
std::cout << "within the first 10 time steps, I consider the given iteration number x3" << std::endl;
maxNonLinearIterations *= 3;
}
if (currentTime < 20 * timeInterval && currentTime >= 10 * timeInterval)
{
if (BaseType::GetEchoLevel() > 1)
std::cout << "within the second 10 time steps, I consider the given iteration number x2" << std::endl;
maxNonLinearIterations *= 2;
}
bool momentumConverged = true;
bool continuityConverged = false;
bool fixedTimeStep = false;
double pressureNorm = 0;
double velocityNorm = 0;
// bool momentumAlreadyConverged=false;
// bool continuityAlreadyConverged=false;
/* boost::timer solve_step_time; */
// std::cout<<" InitializeSolutionStep().... "<<std::endl;
// this->UnactiveSliverElements(); //this is done in set_active_flag_mesher_process which is activated from fluid_pre_refining_mesher.py
InitializeSolutionStep(); // it fills SOLID_NODAL_SFD_NEIGHBOURS_ORDER for solids and NODAL_SFD_NEIGHBOURS_ORDER for fluids and inner solids
for (unsigned int it = 0; it < maxNonLinearIterations; ++it)
{
if (BaseType::GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0)
std::cout << "----- > iteration: " << it << std::endl;
if (it == 0)
{
ComputeNodalVolumeAndAssignFlagToElementType(); // it assings NODAL_VOLUME to fluid and SOLID_NODAL_VOLUME to solid. Interface nodes have both
this->InitializeNonLinearIterations(); // it fills SOLID_NODAL_SFD_NEIGHBOURS for solids and NODAL_SFD_NEIGHBOURS for fluids
}
// std::cout<<" CalcNodalStrainsAndStresses .... "<<std::endl;
CalcNodalStrainsAndStresses(); // it computes stresses and strains for fluid and solid nodes
// std::cout<<" CalcNodalStrainsAndStresses DONE "<<std::endl;
momentumConverged = this->SolveMomentumIteration(it, maxNonLinearIterations, fixedTimeStep, velocityNorm);
UpdateTopology(rModelPart, BaseType::GetEchoLevel());
// std::cout<<" ComputeNodalVolume .... "<<std::endl;
ComputeNodalVolume();
// std::cout<<" ComputeNodalVolume DONE "<<std::endl;
this->InitializeNonLinearIterations();
// std::cout<<" InitializeNonLinearIterations DONE "<<std::endl;
CalcNodalStrains();
// std::cout<<" CalcNodalStrains DONE "<<std::endl;
if (fixedTimeStep == false)
{
continuityConverged = this->SolveContinuityIteration(it, maxNonLinearIterations, pressureNorm);
}
// if((momentumConverged==true || it==maxNonLinearIterations-1) && momentumAlreadyConverged==false){
// std::ofstream myfile;
// myfile.open ("momentumConvergedIteration.txt",std::ios::app);
// myfile << currentTime << "\t" << it << "\n";
// myfile.close();
// momentumAlreadyConverged=true;
// }
// if((continuityConverged==true || it==maxNonLinearIterations-1) && continuityAlreadyConverged==false){
// std::ofstream myfile;
// myfile.open ("continuityConvergedIteration.txt",std::ios::app);
// myfile << currentTime << "\t" << it << "\n";
// myfile.close();
// continuityAlreadyConverged=true;
// }
if (it == maxNonLinearIterations - 1 || ((continuityConverged && momentumConverged) && it > 1))
{
//this->ComputeErrorL2NormCaseImposedG();
//this->ComputeErrorL2NormCasePoiseuille();
this->CalculateAccelerations();
// std::ofstream myfile;
// myfile.open ("maxConvergedIteration.txt",std::ios::app);
// myfile << currentTime << "\t" << it << "\n";
// myfile.close();
}
bool hybridMethod = false;
if (hybridMethod == true)
{
if (it == maxNonLinearIterations - 1 || ((continuityConverged && momentumConverged) && it > 0))
{
this->UpdateElementalStressStrain();
}
}
if ((continuityConverged && momentumConverged) && it > 1)
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false);
rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false);
converged = true;
std::cout << "nodal V-P strategy converged in " << it + 1 << " iterations." << std::endl;
break;
}
if (fixedTimeStep == true)
{
break;
}
}
if (!continuityConverged && !momentumConverged && BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0)
std::cout << "Convergence tolerance not reached." << std::endl;
if (mReformDofSet)
this->Clear();
/* std::cout << "solve_step_time : " << solve_step_time.elapsed() << std::endl; */
return converged;
}
void UpdateElementalStressStrain()
{
ModelPart &rModelPart = BaseType::GetModelPart();
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd);
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
/* itElem-> InitializeElementStrainStressState(); */
itElem->InitializeSolutionStep(rCurrentProcessInfo);
}
}
}
void Initialize() override
{
std::cout << " \n Initialize in nodal_two_step_v_p_strategy_FSI" << std::endl;
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
unsigned int sizeStrains = 3 * (dimension - 1);
// #pragma omp parallel
// {
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
unsigned int neighbourNodes = neighb_nodes.size();
unsigned int sizeSDFNeigh = neighbourNodes * dimension;
if (itNode->SolutionStepsDataHas(NODAL_CAUCHY_STRESS))
{
Vector &rNodalStress = itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS);
if (rNodalStress.size() != sizeStrains)
{
rNodalStress.resize(sizeStrains, false);
}
noalias(rNodalStress) = ZeroVector(sizeStrains);
}
else
{
std::cout << "THIS node does not have NODAL_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_DEVIATORIC_CAUCHY_STRESS))
{
Vector &rNodalStress = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS);
if (rNodalStress.size() != sizeStrains)
{
rNodalStress.resize(sizeStrains, false);
}
noalias(rNodalStress) = ZeroVector(sizeStrains);
}
else
{
std::cout << "THIS node does not have NODAL_DEVIATORIC_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_VOLUME))
{
itNode->FastGetSolutionStepValue(NODAL_VOLUME) = 0;
}
else
{
std::cout << "THIS node does not have NODAL_VOLUME... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_MEAN_MESH_SIZE))
{
itNode->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0;
}
else
{
std::cout << "THIS node does not have NODAL_MEAN_MESH_SIZE... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_FREESURFACE_AREA))
{
itNode->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0;
}
else
{
std::cout << "THIS node does not have NODAL_FREESURFACE_AREA... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_SFD_NEIGHBOURS))
{
Vector &rNodalSFDneighbours = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
if (rNodalSFDneighbours.size() != sizeSDFNeigh)
{
rNodalSFDneighbours.resize(sizeSDFNeigh, false);
}
noalias(rNodalSFDneighbours) = ZeroVector(sizeSDFNeigh);
}
else
{
std::cout << "THIS node does not have NODAL_SFD_NEIGHBOURS... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_SPATIAL_DEF_RATE))
{
Vector &rSpatialDefRate = itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE);
if (rSpatialDefRate.size() != sizeStrains)
{
rSpatialDefRate.resize(sizeStrains, false);
}
noalias(rSpatialDefRate) = ZeroVector(sizeStrains);
}
else
{
std::cout << "THIS node does not have NODAL_SPATIAL_DEF_RATE... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD))
{
Matrix &rFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
if (rFgrad.size1() != dimension)
{
rFgrad.resize(dimension, dimension, false);
}
noalias(rFgrad) = ZeroMatrix(dimension, dimension);
}
else
{
std::cout << "THIS node does not have NODAL_DEFORMATION_GRAD... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD_VEL))
{
Matrix &rFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
if (rFgradVel.size1() != dimension)
{
rFgradVel.resize(dimension, dimension, false);
}
noalias(rFgradVel) = ZeroMatrix(dimension, dimension);
}
else
{
std::cout << "THIS node does not have NODAL_DEFORMATION_GRAD_VEL... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_CAUCHY_STRESS))
{
Vector &rSolidNodalStress = itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS);
if (rSolidNodalStress.size() != sizeStrains)
{
rSolidNodalStress.resize(sizeStrains, false);
}
noalias(rSolidNodalStress) = ZeroVector(sizeStrains);
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS))
{
Vector &rSolidNodalStress = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS);
if (rSolidNodalStress.size() != sizeStrains)
{
rSolidNodalStress.resize(sizeStrains, false);
}
noalias(rSolidNodalStress) = ZeroVector(sizeStrains);
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_VOLUME))
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME) = 0;
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_VOLUME... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_MEAN_MESH_SIZE))
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_MEAN_MESH_SIZE) = 0;
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_MEAN_MESH_SIZE... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_FREESURFACE_AREA))
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_FREESURFACE_AREA) = 0;
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_FREESURFACE_AREA... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_SFD_NEIGHBOURS))
{
Vector &rSolidNodalSFDneighbours = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS);
if (rSolidNodalSFDneighbours.size() != sizeSDFNeigh)
{
rSolidNodalSFDneighbours.resize(sizeSDFNeigh, false);
}
noalias(rSolidNodalSFDneighbours) = ZeroVector(sizeSDFNeigh);
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_SFD_NEIGHBOURS... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_SPATIAL_DEF_RATE))
{
Vector &rSolidSpatialDefRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE);
if (rSolidSpatialDefRate.size() != sizeStrains)
{
rSolidSpatialDefRate.resize(sizeStrains, false);
}
noalias(rSolidSpatialDefRate) = ZeroVector(sizeStrains);
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_SPATIAL_DEF_RATE... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEFORMATION_GRAD))
{
Matrix &rSolidFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
if (rSolidFgrad.size1() != dimension)
{
rSolidFgrad.resize(dimension, dimension, false);
}
noalias(rSolidFgrad) = ZeroMatrix(dimension, dimension);
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_DEFORMATION_GRAD... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEFORMATION_GRAD_VEL))
{
Matrix &rSolidFgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
if (rSolidFgradVel.size1() != dimension)
{
rSolidFgradVel.resize(dimension, dimension, false);
}
noalias(rSolidFgradVel) = ZeroMatrix(dimension, dimension);
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_DEFORMATION_GRAD_VEL... " << itNode->X() << " " << itNode->Y() << std::endl;
}
AssignMaterialToEachNode(itNode);
}
// }
}
void AssignMaterialToEachNode(ModelPart::NodeIterator itNode)
{
ModelPart &rModelPart = BaseType::GetModelPart();
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double deviatoricCoeff = 0;
double volumetricCoeff = 0;
if (itNode->Is(SOLID))
{
const double youngModulus = itNode->FastGetSolutionStepValue(YOUNG_MODULUS);
const double poissonRatio = itNode->FastGetSolutionStepValue(POISSON_RATIO);
//deviatoricCoeff=deltaT*secondLame
deviatoricCoeff = timeInterval * youngModulus / (1.0 + poissonRatio) * 0.5;
//volumetricCoeff=bulk*deltaT=deltaT*(firstLame+2*secondLame/3)
volumetricCoeff = timeInterval * poissonRatio * youngModulus / ((1.0 + poissonRatio) * (1.0 - 2.0 * poissonRatio)) + 2.0 * deviatoricCoeff / 3.0;
}
else if (itNode->Is(FLUID) || itNode->Is(RIGID))
{
deviatoricCoeff = itNode->FastGetSolutionStepValue(DYNAMIC_VISCOSITY);
volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS);
}
if ((itNode->Is(SOLID) && itNode->Is(RIGID)))
{
itNode->FastGetSolutionStepValue(INTERFACE_NODE) = true;
}
else
{
itNode->FastGetSolutionStepValue(INTERFACE_NODE) = false;
}
const double currFirstLame = volumetricCoeff - 2.0 * deviatoricCoeff / 3.0;
//currFirstLame=deltaT*firstLame
itNode->FastGetSolutionStepValue(VOLUMETRIC_COEFFICIENT) = currFirstLame;
itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT) = deviatoricCoeff;
}
void UnactiveSliverElements()
{
KRATOS_TRY;
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
MesherUtilities MesherUtils;
const double ModelPartVolume = MesherUtils.ComputeModelPartVolume(rModelPart);
const double CriticalVolume = 0.001 * ModelPartVolume / double(rModelPart.Elements().size());
double ElementalVolume = 0;
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd);
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
unsigned int numNodes = itElem->GetGeometry().size();
if (numNodes == (dimension + 1))
{
if (dimension == 2)
{
ElementalVolume = (itElem)->GetGeometry().Area();
}
else if (dimension == 3)
{
ElementalVolume = (itElem)->GetGeometry().Volume();
}
if (ElementalVolume < CriticalVolume)
{
// std::cout << "sliver element: it has Volume: " << ElementalVolume << " vs CriticalVolume(meanVol/1000): " << CriticalVolume<< std::endl;
(itElem)->Set(ACTIVE, false);
}
else
{
(itElem)->Set(ACTIVE, true);
}
}
}
}
KRATOS_CATCH("");
}
void ComputeNodalVolume()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ElementsArrayType &pElements = rModelPart.Elements();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, pElements.size(), element_partition);
// #pragma omp parallel
// {
int k = OpenMPUtils::ThisThread();
typename ElementsArrayType::iterator ElemBegin = pElements.begin() + element_partition[k];
typename ElementsArrayType::iterator ElemEnd = pElements.begin() + element_partition[k + 1];
for (typename ElementsArrayType::iterator itElem = ElemBegin; itElem != ElemEnd; itElem++) //MSI: To be parallelized
{
Element::GeometryType &geometry = itElem->GetGeometry();
double elementalVolume = 0;
if (dimension == 2)
{
elementalVolume = geometry.Area() / 3.0;
}
else if (dimension == 3)
{
elementalVolume = geometry.Volume() * 0.25;
}
// index = 0;
unsigned int numNodes = geometry.size();
for (unsigned int i = 0; i < numNodes; i++)
{
double &nodalVolume = geometry(i)->FastGetSolutionStepValue(NODAL_VOLUME);
nodalVolume += elementalVolume;
if (itElem->Is(SOLID))
{
double &solidVolume = geometry(i)->FastGetSolutionStepValue(SOLID_NODAL_VOLUME);
solidVolume += elementalVolume;
nodalVolume += -elementalVolume;
// if(geometry(i)->FastGetSolutionStepValue(INTERFACE_NODE)==true){
// //I have the subtract the solid volume to the nodal volume of the interface fluid nodes because I added it before
// nodalVolume += -elementalVolume;
// }
}
}
}
// }
}
void ComputeNodalVolumeAndAssignFlagToElementType()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ElementsArrayType &pElements = rModelPart.Elements();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, pElements.size(), element_partition);
// #pragma omp parallel
// {
int k = OpenMPUtils::ThisThread();
typename ElementsArrayType::iterator ElemBegin = pElements.begin() + element_partition[k];
typename ElementsArrayType::iterator ElemEnd = pElements.begin() + element_partition[k + 1];
for (typename ElementsArrayType::iterator itElem = ElemBegin; itElem != ElemEnd; itElem++) //MSI: To be parallelized
{
Element::GeometryType &geometry = itElem->GetGeometry();
double elementalVolume = 0;
if (dimension == 2)
{
elementalVolume = geometry.Area() / 3.0;
}
else if (dimension == 3)
{
elementalVolume = geometry.Volume() * 0.25;
}
// index = 0;
unsigned int numNodes = geometry.size();
unsigned int fluidNodes = 0;
unsigned int solidNodes = 0;
unsigned int interfaceNodes = 0;
for (unsigned int i = 0; i < numNodes; i++)
{
if ((geometry(i)->Is(FLUID) && geometry(i)->IsNot(SOLID)) || (geometry(i)->Is(FLUID) && geometry(i)->FastGetSolutionStepValue(INTERFACE_NODE) == true))
{
fluidNodes += 1;
}
if (geometry(i)->Is(SOLID))
{
solidNodes += 1;
}
if (geometry(i)->FastGetSolutionStepValue(INTERFACE_NODE) == true)
{
interfaceNodes += 1;
}
}
if (solidNodes == numNodes)
{
itElem->Set(SOLID);
// std::cout<<"THIS SOLID ELEMENT WAS "<<geometry(0)->Id()<<" "<<geometry(1)->Id()<<" "<<geometry(2)->Id()<<" "<<std::endl;
}
if (interfaceNodes == numNodes)
{
itElem->Set(SOLID);
// std::cout<<"THIS INTERFACE ELEMENT WAS "<<geometry(0)->Id()<<" "<<geometry(1)->Id()<<" "<<geometry(2)->Id()<<" "<<std::endl;
}
if (fluidNodes == numNodes)
{
itElem->Set(FLUID);
// std::cout<<"THIS FLUID ELEMENT WAS "<<geometry(0)->Id()<<" "<<geometry(1)->Id()<<" "<<geometry(2)->Id()<<" "<<std::endl;
}
if (solidNodes == numNodes && fluidNodes == numNodes)
{
itElem->Reset(FLUID);
// std::cout<<"THIS ELEMENT WAS BOTH FLUID AND SOLID "<<geometry(0)->Id()<<" "<<geometry(1)->Id()<<" "<<geometry(2)->Id()<<" "<<std::endl;
}
for (unsigned int i = 0; i < numNodes; i++)
{
double &nodalVolume = geometry(i)->FastGetSolutionStepValue(NODAL_VOLUME);
nodalVolume += elementalVolume;
if (itElem->Is(SOLID))
{
double &solidVolume = geometry(i)->FastGetSolutionStepValue(SOLID_NODAL_VOLUME);
solidVolume += elementalVolume;
nodalVolume += -elementalVolume;
// if(geometry(i)->FastGetSolutionStepValue(INTERFACE_NODE)==true){
// //I have the subtract the solid volume to the nodal volume of the interface fluid nodes because I added it before
// nodalVolume += -elementalVolume;
// }
// if(interfaceNodes==numNodes && solidDensity==0){
// std::cout<<"This interface element has not a correct density....I am assigning it the fluid density----- TODO: IMPROVE IT, TAKE FROM NEIGHBOURS"<<std::endl;
// double density=geometry(i)->FastGetSolutionStepValue(DENSITY);
// geometry(i)->FastGetSolutionStepValue(SOLID_DENSITY)=density;
// }
}
}
}
// }
}
void InitializeSolutionStep() override
{
FillNodalSFDVector();
}
void FillNodalSFDVector()
{
// std::cout << "FillNodalSFDVector(); ... " << std::endl;
ModelPart &rModelPart = BaseType::GetModelPart();
// #pragma omp parallel
// {
// ModelPart::NodeIterator NodesBegin;
// ModelPart::NodeIterator NodesEnd;
// OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd);
// for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
// {
for (ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); itNode++)
{
this->InitializeNodalVariablesForRemeshedDomain(itNode);
InitializeNodalVariablesForSolidRemeshedDomain(itNode);
if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == false)
{
this->SetNeighboursOrderToNode(itNode); // it assigns neighbours to inner nodes, filling NODAL_SFD_NEIGHBOURS_ORDER
if (itNode->Is(SOLID))
{
SetNeighboursOrderToSolidNode(itNode); // it assigns neighbours to solid inner nodes, filling SOLID_NODAL_SFD_NEIGHBOURS_ORDER
}
}
else
{
SetNeighboursOrderToInterfaceNode(itNode); // it assigns neighbours to interface nodes, filling SOLID_NODAL_SFD_NEIGHBOURS_ORDER for solids and NODAL_SFD_NEIGHBOURS_ORDER for fluids
}
}
// }
// std::cout << "FillNodalSFDVector(); DONE " << std::endl;
}
void SetNeighboursOrderToSolidNode(ModelPart::NodeIterator itNode)
{
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
unsigned int neighbourNodes = neighb_nodes.size() + 1; // +1 becausealso the node itself must be considered as nieghbor node
Vector &rNodeOrderedNeighbours = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
if (rNodeOrderedNeighbours.size() != neighbourNodes)
rNodeOrderedNeighbours.resize(neighbourNodes, false);
noalias(rNodeOrderedNeighbours) = ZeroVector(neighbourNodes);
rNodeOrderedNeighbours[0] = itNode->Id();
if (neighbourNodes > 1)
{
for (unsigned int k = 0; k < neighbourNodes - 1; k++)
{
rNodeOrderedNeighbours[k + 1] = neighb_nodes[k].Id();
}
}
}
void SetNeighboursOrderToInterfaceNode(ModelPart::NodeIterator itNode)
{
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
unsigned int neighbourNodes = neighb_nodes.size() + 1;
unsigned int fluidCounter = 1;
unsigned int solidCounter = 1;
if (neighbourNodes > 1)
{
for (unsigned int k = 0; k < neighbourNodes - 1; k++)
{
if (neighb_nodes[k].IsNot(SOLID) || neighb_nodes[k].FastGetSolutionStepValue(INTERFACE_NODE) == true)
{
fluidCounter += 1;
}
if (neighb_nodes[k].Is(SOLID))
{
solidCounter += 1;
}
}
}
Vector &rFluidNodeOrderedNeighbours = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
Vector &rSolidNodeOrderedNeighbours = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
if (rFluidNodeOrderedNeighbours.size() != fluidCounter)
rFluidNodeOrderedNeighbours.resize(fluidCounter, false);
if (rSolidNodeOrderedNeighbours.size() != solidCounter)
rSolidNodeOrderedNeighbours.resize(solidCounter, false);
noalias(rFluidNodeOrderedNeighbours) = ZeroVector(fluidCounter);
noalias(rSolidNodeOrderedNeighbours) = ZeroVector(solidCounter);
rFluidNodeOrderedNeighbours[0] = itNode->Id();
rSolidNodeOrderedNeighbours[0] = itNode->Id();
fluidCounter = 0;
solidCounter = 0;
if (neighbourNodes > 1)
{
for (unsigned int k = 0; k < neighbourNodes - 1; k++)
{
if (neighb_nodes[k].IsNot(SOLID) || neighb_nodes[k].FastGetSolutionStepValue(INTERFACE_NODE) == true)
{
fluidCounter += 1;
rFluidNodeOrderedNeighbours[fluidCounter] = neighb_nodes[k].Id();
}
if (neighb_nodes[k].Is(SOLID))
{
solidCounter += 1;
rSolidNodeOrderedNeighbours[solidCounter] = neighb_nodes[k].Id();
}
}
}
// fluidCounter+=1;
// solidCounter+=1;
// ModelPart& rModelPart = BaseType::GetModelPart();
// const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
// const unsigned int sizeFluidSDFNeigh=fluidCounter*dimension;
// const unsigned int sizeSolidSDFNeigh=solidCounter*dimension;
// Vector& rFluidNodalSFDneighbours=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
// Vector& rSolidNodalSFDneighbours=itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS);
// if(rFluidNodalSFDneighbours.size() != sizeFluidSDFNeigh)
// rFluidNodalSFDneighbours.resize(sizeFluidSDFNeigh,false);
// if(rSolidNodalSFDneighbours.size() != sizeSolidSDFNeigh)
// rSolidNodalSFDneighbours.resize(sizeSolidSDFNeigh,false);
// noalias(rFluidNodalSFDneighbours)=ZeroVector(sizeFluidSDFNeigh);
// noalias(rSolidNodalSFDneighbours)=ZeroVector(sizeSolidSDFNeigh);
// rFluidNodalSFDneighbours.resize(sizeFluidSDFNeigh,true);
// rSolidNodalSFDneighbours.resize(sizeSolidSDFNeigh,true);
// std::cout<<"rFluidNodeOrderedNeighbours "<<rFluidNodeOrderedNeighbours<<std::endl;
// std::cout<<"rSolidNodeOrderedNeighbours "<<rSolidNodeOrderedNeighbours<<std::endl;
// std::cout<<"rFluidNodalSFDneighbours "<<rFluidNodalSFDneighbours<<std::endl;
// std::cout<<"rSolidNodalSFDneighbours "<<rSolidNodalSFDneighbours<<std::endl;
}
void InitializeNodalVariablesForSolidRemeshedDomain(ModelPart::NodeIterator itNode)
{
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
unsigned int sizeStrains = 3 * (dimension - 1);
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
unsigned int neighbourNodes = neighb_nodes.size() + 1;
unsigned int sizeSDFNeigh = neighbourNodes * dimension;
if (itNode->SolutionStepsDataHas(SOLID_NODAL_CAUCHY_STRESS))
{
Vector &rSolidNodalStress = itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS);
if (rSolidNodalStress.size() != sizeStrains)
rSolidNodalStress.resize(sizeStrains, false);
noalias(rSolidNodalStress) = ZeroVector(sizeStrains);
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS))
{
Vector &rSolidNodalDevStress = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS);
if (rSolidNodalDevStress.size() != sizeStrains)
rSolidNodalDevStress.resize(sizeStrains, false);
noalias(rSolidNodalDevStress) = ZeroVector(sizeStrains);
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_SFD_NEIGHBOURS))
{
Vector &rSolidNodalSFDneighbours = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS);
if (rSolidNodalSFDneighbours.size() != sizeSDFNeigh)
rSolidNodalSFDneighbours.resize(sizeSDFNeigh, false);
noalias(rSolidNodalSFDneighbours) = ZeroVector(sizeSDFNeigh);
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_SFD_NEIGHBOURS_ORDER))
{
Vector &rSolidNodalSFDneighboursOrder = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
if (rSolidNodalSFDneighboursOrder.size() != neighbourNodes)
rSolidNodalSFDneighboursOrder.resize(neighbourNodes, false);
noalias(rSolidNodalSFDneighboursOrder) = ZeroVector(neighbourNodes);
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_SPATIAL_DEF_RATE))
{
Vector &rSolidSpatialDefRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE);
if (rSolidSpatialDefRate.size() != sizeStrains)
rSolidSpatialDefRate.resize(sizeStrains, false);
noalias(rSolidSpatialDefRate) = ZeroVector(sizeStrains);
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEFORMATION_GRAD))
{
Matrix &rSolidFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
if (rSolidFgrad.size1() != dimension)
rSolidFgrad.resize(dimension, dimension, false);
noalias(rSolidFgrad) = ZeroMatrix(dimension, dimension);
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEFORMATION_GRAD_VEL))
{
Matrix &rSolidFgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
if (rSolidFgradVel.size1() != dimension)
rSolidFgradVel.resize(dimension, dimension, false);
noalias(rSolidFgradVel) = ZeroMatrix(dimension, dimension);
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_VOLUME))
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME) = 0;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_MEAN_MESH_SIZE))
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_MEAN_MESH_SIZE) = 0;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_FREESURFACE_AREA))
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_FREESURFACE_AREA) = 0;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_VOLUMETRIC_DEF_RATE))
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = 0;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_EQUIVALENT_STRAIN_RATE))
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = 0;
}
}
void CalcNodalStrainsAndStresses()
{
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
// #pragma omp parallel
// {
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
const double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME);
const double solidNodalVolume = itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME);
double theta = 0.5;
if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true)
{
if (nodalVolume > 0)
{
Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
Vector rNodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
Matrix &interfaceFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
Matrix &interfaceFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
if (interfaceFgrad.size1() != dimension)
interfaceFgrad.resize(dimension, dimension, false);
if (interfaceFgradVel.size1() != dimension)
interfaceFgradVel.resize(dimension, dimension, false);
noalias(interfaceFgrad) = ZeroMatrix(dimension, dimension);
noalias(interfaceFgradVel) = ZeroMatrix(dimension, dimension);
//I have to compute the stresses and strains two times because one time is for the solid and the other for the fluid
// Matrix interfaceFgrad=ZeroMatrix(dimension,dimension);
// Matrix interfaceFgradVel=ZeroMatrix(dimension,dimension);
//the following function is more expensive than the general one because there is one loop more over neighbour nodes. This is why I do it here also for fluid interface nodes.
ComputeAndStoreNodalDeformationGradientForInterfaceNode(itNode, nodalSFDneighboursId, rNodalSFDneigh, theta, interfaceFgrad, interfaceFgradVel);
// itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD)=interfaceFgrad;
// itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL)=interfaceFgradVel;
CalcNodalStrainsAndStressesForInterfaceFluidNode(itNode);
}
if (solidNodalVolume > 0)
{
Vector solidNodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
Vector rSolidNodalSFDneigh = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS);
Matrix &solidInterfaceFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
Matrix &solidInterfaceFgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
if (solidInterfaceFgrad.size1() != dimension)
solidInterfaceFgrad.resize(dimension, dimension, false);
if (solidInterfaceFgradVel.size1() != dimension)
solidInterfaceFgradVel.resize(dimension, dimension, false);
noalias(solidInterfaceFgrad) = ZeroMatrix(dimension, dimension);
noalias(solidInterfaceFgradVel) = ZeroMatrix(dimension, dimension);
theta = 1.0;
// Matrix solidInterfaceFgrad=ZeroMatrix(dimension,dimension);
// Matrix solidInterfaceFgradVel=ZeroMatrix(dimension,dimension);
ComputeAndStoreNodalDeformationGradientForInterfaceNode(itNode, solidNodalSFDneighboursId, rSolidNodalSFDneigh, theta, solidInterfaceFgrad, solidInterfaceFgradVel);
// itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD)=solidInterfaceFgrad;
// itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL)=solidInterfaceFgradVel;
CalcNodalStrainsAndStressesForInterfaceSolidNode(itNode);
}
}
else
{
if (itNode->Is(SOLID) && solidNodalVolume > 0)
{
theta = 1.0;
ComputeAndStoreNodalDeformationGradientForSolidNode(itNode, theta);
CalcNodalStrainsAndStressesForSolidNode(itNode);
}
else if (nodalVolume > 0)
{
theta = 0.5;
this->ComputeAndStoreNodalDeformationGradient(itNode, theta);
this->CalcNodalStrainsAndStressesForNode(itNode);
}
}
if (nodalVolume == 0 && solidNodalVolume == 0)
{ // if nodalVolume==0
theta = 0.5;
this->InitializeNodalVariablesForRemeshedDomain(itNode);
InitializeNodalVariablesForSolidRemeshedDomain(itNode);
}
// }
// if(itNode->Is(SOLID) && itNode->FastGetSolutionStepValue(INTERFACE_NODE)==false){
// CopyValuesToSolidNonInterfaceNodes(itNode);
// }
}
// }
/* std::cout << "Calc Nodal Strains And Stresses DONE " << std::endl; */
}
void CopyValuesToSolidNonInterfaceNodes(ModelPart::NodeIterator itNode)
{
Vector &solidNodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
Vector &solidNodalSFDneigh = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS);
Matrix &solidInterfaceFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
Matrix &solidInterfaceFgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
Vector &solidSpatialDefRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE);
double &volumetricDefRate = itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE);
Vector &solidCauchyStress = itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS);
Vector &solidDeviatoricCauchyStress = itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS);
Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
unsigned int sizeNodalSFDneighboursId = nodalSFDneighboursId.size();
solidNodalSFDneighboursId.resize(sizeNodalSFDneighboursId, false);
Vector nodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
unsigned int sizeNodalSFDneigh = nodalSFDneigh.size();
solidNodalSFDneigh.resize(sizeNodalSFDneigh, false);
solidNodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
solidNodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
solidInterfaceFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
solidInterfaceFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
solidSpatialDefRate = itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE);
volumetricDefRate = itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE);
solidCauchyStress = itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS);
solidDeviatoricCauchyStress = itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS);
}
void CalcNodalStrainsAndStressesForInterfaceFluidNode(ModelPart::NodeIterator itNode)
{
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double deviatoricCoeff = itNode->FastGetSolutionStepValue(DYNAMIC_VISCOSITY);
const double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
const double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
const double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE);
const double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
const double currFirstLame = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS);
Matrix Fgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
Matrix FgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
double detFgrad = 1.0;
Matrix InvFgrad = ZeroMatrix(dimension, dimension);
Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension);
if (dimension == 2)
{
MathUtils<double>::InvertMatrix2(Fgrad, InvFgrad, detFgrad);
}
else if (dimension == 3)
{
MathUtils<double>::InvertMatrix3(Fgrad, InvFgrad, detFgrad);
}
//it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj
SpatialVelocityGrad = prod(FgradVel, InvFgrad);
if (dimension == 2)
{
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
const double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] +
4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]));
const double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
const double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE);
const double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
const double DefVol = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1];
itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0];
double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1];
double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2];
double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0);
double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0);
double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2];
// if(itNode->Is(SOLID))
// {
// nodalSigmaTot_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[0];
// nodalSigmaTot_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[1];
// nodalSigmaTot_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[2];
// nodalSigmaDev_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[0];
// nodalSigmaDev_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[1];
// nodalSigmaDev_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[2];
// }
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[0] = nodalSigmaTot_xx;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[1] = nodalSigmaTot_yy;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[2] = nodalSigmaTot_xy;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[0] = nodalSigmaDev_xx;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[1] = nodalSigmaDev_yy;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[2] = nodalSigmaDev_xy;
}
else if (dimension == 3)
{
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] = SpatialVelocityGrad(2, 2);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2));
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2));
const double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] +
2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] +
4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] +
4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] +
4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5]);
const double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
const double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE);
const double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
const double DefVol = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2];
itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0];
double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1];
double nodalSigmaTot_zz = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2];
double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3];
double nodalSigmaTot_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4];
double nodalSigmaTot_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5];
double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0);
double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0);
double nodalSigmaDev_zz = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] - DefVol / 3.0);
double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3];
double nodalSigmaDev_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4];
double nodalSigmaDev_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5];
// if(itNode->Is(SOLID))
// {
// nodalSigmaTot_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[0];
// nodalSigmaTot_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[1];
// nodalSigmaTot_zz+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[2];
// nodalSigmaTot_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[3];
// nodalSigmaTot_xz+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[4];
// nodalSigmaTot_yz+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[5];
// nodalSigmaDev_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[0];
// nodalSigmaDev_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[1];
// nodalSigmaDev_zz+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[2];
// nodalSigmaDev_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[3];
// nodalSigmaDev_xz+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[4];
// nodalSigmaDev_yz+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[5];
// }
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[0] = nodalSigmaTot_xx;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[1] = nodalSigmaTot_yy;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[2] = nodalSigmaTot_zz;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[3] = nodalSigmaTot_xy;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[4] = nodalSigmaTot_xz;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[5] = nodalSigmaTot_yz;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[0] = nodalSigmaDev_xx;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[1] = nodalSigmaDev_yy;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[2] = nodalSigmaDev_zz;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[3] = nodalSigmaDev_xy;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[4] = nodalSigmaDev_xz;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[5] = nodalSigmaDev_yz;
}
}
void CalcNodalStrainsAndStressesForInterfaceSolidNode(ModelPart::NodeIterator itNode)
{
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
const double youngModulus = itNode->FastGetSolutionStepValue(YOUNG_MODULUS);
const double poissonRatio = itNode->FastGetSolutionStepValue(POISSON_RATIO);
const double currFirstLame = timeInterval * poissonRatio * youngModulus / ((1.0 + poissonRatio) * (1.0 - 2.0 * poissonRatio));
double deviatoricCoeff = timeInterval * youngModulus / (1.0 + poissonRatio) * 0.5;
Matrix Fgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
Matrix FgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
double detFgrad = 1.0;
Matrix InvFgrad = ZeroMatrix(dimension, dimension);
Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension);
if (dimension == 2)
{
MathUtils<double>::InvertMatrix2(Fgrad, InvFgrad, detFgrad);
}
else if (dimension == 3)
{
MathUtils<double>::InvertMatrix3(Fgrad, InvFgrad, detFgrad);
}
//it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj
SpatialVelocityGrad = prod(FgradVel, InvFgrad);
if (dimension == 2)
{
auto &r_stain_tensor2D = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE);
r_stain_tensor2D[0] = SpatialVelocityGrad(0, 0);
r_stain_tensor2D[1] = SpatialVelocityGrad(1, 1);
r_stain_tensor2D[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
const double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]));
const double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
const double equivalentStrainRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE);
const double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
const double DefVol = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0];
double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0);
double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0);
double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
if (itNode->Is(SOLID))
{
nodalSigmaTot_xx += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[0];
nodalSigmaTot_yy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[1];
nodalSigmaTot_xy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[2];
nodalSigmaDev_xx += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[0];
nodalSigmaDev_yy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[1];
nodalSigmaDev_xy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[2];
}
auto &r_stress_tensor2D = itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0);
r_stress_tensor2D[0] = nodalSigmaTot_xx;
r_stress_tensor2D[1] = nodalSigmaTot_yy;
r_stress_tensor2D[2] = nodalSigmaTot_xy;
auto &r_dev_stress_tensor2D = itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0);
r_dev_stress_tensor2D[0] = nodalSigmaDev_xx;
r_dev_stress_tensor2D[1] = nodalSigmaDev_yy;
r_dev_stress_tensor2D[2] = nodalSigmaDev_xy;
}
else if (dimension == 3)
{
auto &r_stain_tensor3D = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE);
r_stain_tensor3D[0] = SpatialVelocityGrad(0, 0);
r_stain_tensor3D[1] = SpatialVelocityGrad(1, 1);
r_stain_tensor3D[2] = SpatialVelocityGrad(2, 2);
r_stain_tensor3D[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
r_stain_tensor3D[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2));
r_stain_tensor3D[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2));
const double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]);
const double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
const double equivalentStrainRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE);
const double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
const double DefVol = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0];
double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
double nodalSigmaTot_zz = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3];
double nodalSigmaTot_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4];
double nodalSigmaTot_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5];
double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0);
double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0);
double nodalSigmaDev_zz = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] - DefVol / 3.0);
double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3];
double nodalSigmaDev_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4];
double nodalSigmaDev_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5];
if (itNode->Is(SOLID))
{
nodalSigmaTot_xx += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[0];
nodalSigmaTot_yy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[1];
nodalSigmaTot_zz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[2];
nodalSigmaTot_xy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[3];
nodalSigmaTot_xz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[4];
nodalSigmaTot_yz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[5];
nodalSigmaDev_xx += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[0];
nodalSigmaDev_yy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[1];
nodalSigmaDev_zz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[2];
nodalSigmaDev_xy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[3];
nodalSigmaDev_xz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[4];
nodalSigmaDev_yz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[5];
}
auto &r_stress_tensor3D = itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0);
r_stress_tensor3D[0] = nodalSigmaTot_xx;
r_stress_tensor3D[1] = nodalSigmaTot_yy;
r_stress_tensor3D[2] = nodalSigmaTot_zz;
r_stress_tensor3D[3] = nodalSigmaTot_xy;
r_stress_tensor3D[4] = nodalSigmaTot_xz;
r_stress_tensor3D[5] = nodalSigmaTot_yz;
auto &r_dev_stress_tensor3D = itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0);
r_dev_stress_tensor3D[0] = nodalSigmaDev_xx;
r_dev_stress_tensor3D[1] = nodalSigmaDev_yy;
r_dev_stress_tensor3D[2] = nodalSigmaDev_zz;
r_dev_stress_tensor3D[3] = nodalSigmaDev_xy;
r_dev_stress_tensor3D[4] = nodalSigmaDev_xz;
r_dev_stress_tensor3D[5] = nodalSigmaDev_yz;
}
}
void CalcNodalStrainsAndStressesForSolidNode(ModelPart::NodeIterator itNode)
{
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
const double youngModulus = itNode->FastGetSolutionStepValue(YOUNG_MODULUS);
const double poissonRatio = itNode->FastGetSolutionStepValue(POISSON_RATIO);
const double currFirstLame = timeInterval * poissonRatio * youngModulus / ((1.0 + poissonRatio) * (1.0 - 2.0 * poissonRatio));
double deviatoricCoeff = timeInterval * youngModulus / (1.0 + poissonRatio) * 0.5;
Matrix Fgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
Matrix FgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
double detFgrad = 1.0;
Matrix InvFgrad = ZeroMatrix(dimension, dimension);
Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension);
if (dimension == 2)
{
MathUtils<double>::InvertMatrix2(Fgrad, InvFgrad, detFgrad);
}
else if (dimension == 3)
{
MathUtils<double>::InvertMatrix3(Fgrad, InvFgrad, detFgrad);
}
// if(itNode->Is(SOLID)){
// std::cout<<"solid node"<<std::endl;
// }
// if(itNode->Is(FLUID)){
// std::cout<<"FLUID node"<<std::endl;
// }
// if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){
// std::cout<<"currFirstLame "<<currFirstLame<<" deviatoricCoeff "<<deviatoricCoeff<<std::endl;
// }else{
// std::cout<<"NOT INTERFACE currFirstLame "<<currFirstLame<<" deviatoricCoeff "<<deviatoricCoeff<<std::endl;
// }
//it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj
SpatialVelocityGrad = prod(FgradVel, InvFgrad);
if (dimension == 2)
{
auto &r_stain_tensor2D = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE);
r_stain_tensor2D[0] = SpatialVelocityGrad(0, 0);
r_stain_tensor2D[1] = SpatialVelocityGrad(1, 1);
r_stain_tensor2D[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
const double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]));
const double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
const double equivalentStrainRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE);
const double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
const double DefVol = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0];
double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0);
double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0);
double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
if (itNode->Is(SOLID))
{
nodalSigmaTot_xx += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[0];
nodalSigmaTot_yy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[1];
nodalSigmaTot_xy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[2];
nodalSigmaDev_xx += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[0];
nodalSigmaDev_yy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[1];
nodalSigmaDev_xy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[2];
}
auto &r_stress_tensor2D = itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0);
r_stress_tensor2D[0] = nodalSigmaTot_xx;
r_stress_tensor2D[1] = nodalSigmaTot_yy;
r_stress_tensor2D[2] = nodalSigmaTot_xy;
auto &r_dev_stress_tensor2D = itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0);
r_dev_stress_tensor2D[0] = nodalSigmaDev_xx;
r_dev_stress_tensor2D[1] = nodalSigmaDev_yy;
r_dev_stress_tensor2D[2] = nodalSigmaDev_xy;
}
else if (dimension == 3)
{
auto &r_stain_tensor3D = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE);
r_stain_tensor3D[0] = SpatialVelocityGrad(0, 0);
r_stain_tensor3D[1] = SpatialVelocityGrad(1, 1);
r_stain_tensor3D[2] = SpatialVelocityGrad(2, 2);
r_stain_tensor3D[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
r_stain_tensor3D[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2));
r_stain_tensor3D[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2));
const double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]);
const double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
const double equivalentStrainRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE);
const double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
const double DefVol = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0];
double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
double nodalSigmaTot_zz = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3];
double nodalSigmaTot_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4];
double nodalSigmaTot_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5];
double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0);
double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0);
double nodalSigmaDev_zz = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] - DefVol / 3.0);
double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3];
double nodalSigmaDev_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4];
double nodalSigmaDev_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5];
if (itNode->Is(SOLID))
{
nodalSigmaTot_xx += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[0];
nodalSigmaTot_yy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[1];
nodalSigmaTot_zz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[2];
nodalSigmaTot_xy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[3];
nodalSigmaTot_xz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[4];
nodalSigmaTot_yz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[5];
nodalSigmaDev_xx += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[0];
nodalSigmaDev_yy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[1];
nodalSigmaDev_zz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[2];
nodalSigmaDev_xy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[3];
nodalSigmaDev_xz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[4];
nodalSigmaDev_yz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[5];
}
auto &r_tensor3D = itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0);
r_tensor3D[0] = nodalSigmaTot_xx;
r_tensor3D[1] = nodalSigmaTot_yy;
r_tensor3D[2] = nodalSigmaTot_zz;
r_tensor3D[3] = nodalSigmaTot_xy;
r_tensor3D[4] = nodalSigmaTot_xz;
r_tensor3D[5] = nodalSigmaTot_yz;
auto &r_dev_tensor3D = itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0);
r_dev_tensor3D[0] = nodalSigmaDev_xx;
r_dev_tensor3D[1] = nodalSigmaDev_yy;
r_dev_tensor3D[2] = nodalSigmaDev_zz;
r_dev_tensor3D[3] = nodalSigmaDev_xy;
r_dev_tensor3D[4] = nodalSigmaDev_xz;
r_dev_tensor3D[5] = nodalSigmaDev_yz;
}
}
void CalcNodalStrainsForSolidNode(ModelPart::NodeIterator itNode)
{
/* std::cout << "Calc Nodal Strains " << std::endl; */
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
// Matrix Fgrad=itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
// Matrix FgradVel=itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
// double detFgrad=1.0;
// Matrix InvFgrad=ZeroMatrix(dimension,dimension);
// Matrix SpatialVelocityGrad=ZeroMatrix(dimension,dimension);
double detFgrad = 1.0;
Matrix nodalFgrad = ZeroMatrix(dimension, dimension);
Matrix FgradVel = ZeroMatrix(dimension, dimension);
Matrix InvFgrad = ZeroMatrix(dimension, dimension);
Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension);
nodalFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
FgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
//Inverse
if (dimension == 2)
{
MathUtils<double>::InvertMatrix2(nodalFgrad, InvFgrad, detFgrad);
}
else if (dimension == 3)
{
MathUtils<double>::InvertMatrix3(nodalFgrad, InvFgrad, detFgrad);
}
//it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj
SpatialVelocityGrad = prod(FgradVel, InvFgrad);
if (dimension == 2)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]));
const double DefX = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0];
const double DefY = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
const double DefVol = DefX + DefY;
itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
}
else if (dimension == 3)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] = SpatialVelocityGrad(2, 2);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2));
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2));
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]);
const double DefX = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0];
const double DefY = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
const double DefZ = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
const double DefVol = DefX + DefY + DefZ;
itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
}
}
void CalcNodalStrainsForInterfaceSolidNode(ModelPart::NodeIterator itNode)
{
/* std::cout << "Calc Nodal Strains " << std::endl; */
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
Matrix Fgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
Matrix FgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
double detFgrad = 1.0;
Matrix InvFgrad = ZeroMatrix(dimension, dimension);
Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension);
//Inverse
if (dimension == 2)
{
MathUtils<double>::InvertMatrix2(Fgrad, InvFgrad, detFgrad);
}
else if (dimension == 3)
{
MathUtils<double>::InvertMatrix3(Fgrad, InvFgrad, detFgrad);
}
//it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj
SpatialVelocityGrad = prod(FgradVel, InvFgrad);
if (dimension == 2)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]));
const double DefX = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0];
const double DefY = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
const double DefVol = DefX + DefY;
itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
}
else if (dimension == 3)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] = SpatialVelocityGrad(2, 2);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2));
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2));
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]);
const double DefX = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0];
const double DefY = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
const double DefZ = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
const double DefVol = DefX + DefY + DefZ;
itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
}
/* std::cout << "Calc Nodal Strains And Stresses DONE " << std::endl; */
}
void CalcNodalStrains()
{
/* std::cout << "Calc Nodal Strains " << std::endl; */
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
// #pragma omp parallel
// {
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
const double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME);
const double solidNodalVolume = itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME);
double theta = 1.0;
if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true)
{
if (nodalVolume > 0)
{
//I have to compute the strains two times because one time is for the solid and the other for the fluid
Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
Vector rNodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
Matrix &interfaceFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
Matrix &interfaceFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
if (interfaceFgrad.size1() != dimension)
interfaceFgrad.resize(dimension, dimension, false);
if (interfaceFgradVel.size1() != dimension)
interfaceFgradVel.resize(dimension, dimension, false);
noalias(interfaceFgrad) = ZeroMatrix(dimension, dimension);
noalias(interfaceFgradVel) = ZeroMatrix(dimension, dimension);
// Matrix interfaceFgrad = ZeroMatrix(dimension,dimension);
// Matrix interfaceFgradVel = ZeroMatrix(dimension,dimension);
//the following function is more expensive than the general one because there is one loop more over neighbour nodes. This is why I do it here also for fluid interface nodes.
ComputeAndStoreNodalDeformationGradientForInterfaceNode(itNode, nodalSFDneighboursId, rNodalSFDneigh, theta, interfaceFgrad, interfaceFgradVel);
// itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD)=interfaceFgrad;
// itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL)=interfaceFgradVel;
this->CalcNodalStrainsForNode(itNode);
}
if (solidNodalVolume > 0)
{
Vector solidNodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
Vector rSolidNodalSFDneigh = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS);
Matrix &solidInterfaceFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
Matrix &solidInterfaceFgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
if (solidInterfaceFgrad.size1() != dimension)
solidInterfaceFgrad.resize(dimension, dimension, false);
if (solidInterfaceFgradVel.size1() != dimension)
solidInterfaceFgradVel.resize(dimension, dimension, false);
noalias(solidInterfaceFgrad) = ZeroMatrix(dimension, dimension);
noalias(solidInterfaceFgradVel) = ZeroMatrix(dimension, dimension);
// Matrix solidInterfaceFgrad = ZeroMatrix(dimension,dimension);
// Matrix solidInterfaceFgradVel = ZeroMatrix(dimension,dimension);
ComputeAndStoreNodalDeformationGradientForInterfaceNode(itNode, solidNodalSFDneighboursId, rSolidNodalSFDneigh, theta, solidInterfaceFgrad, solidInterfaceFgradVel);
// itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD)=solidInterfaceFgrad;
// itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL)=solidInterfaceFgradVel;
CalcNodalStrainsForInterfaceSolidNode(itNode);
}
}
else
{
if (itNode->Is(SOLID) && solidNodalVolume > 0)
{
ComputeAndStoreNodalDeformationGradientForSolidNode(itNode, theta);
CalcNodalStrainsForSolidNode(itNode);
}
else if (nodalVolume > 0)
{
this->ComputeAndStoreNodalDeformationGradient(itNode, theta);
this->CalcNodalStrainsForNode(itNode);
}
}
if (nodalVolume == 0 && solidNodalVolume == 0)
{ // if nodalVolume==0
this->InitializeNodalVariablesForRemeshedDomain(itNode);
InitializeNodalVariablesForSolidRemeshedDomain(itNode);
}
// if(itNode->Is(SOLID) && itNode->FastGetSolutionStepValue(INTERFACE_NODE)==false){
// CopyValuesToSolidNonInterfaceNodes(itNode);
// }
}
// }
/* std::cout << "Calc Nodal Strains And Stresses DONE " << std::endl; */
}
void ComputeAndStoreNodalDeformationGradientForSolidNode(ModelPart::NodeIterator itNode, double theta)
{
KRATOS_TRY;
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
Vector rNodalSFDneigh = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS);
/* unsigned int idThisNode=nodalSFDneighboursId[0]; */
const unsigned int neighSize = nodalSFDneighboursId.size();
Matrix Fgrad = ZeroMatrix(dimension, dimension);
Matrix FgradVel = ZeroMatrix(dimension, dimension);
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
if (dimension == 2)
{
double dNdXi = rNodalSFDneigh[0];
double dNdYi = rNodalSFDneigh[1];
Fgrad(0, 0) += dNdXi * itNode->X();
Fgrad(0, 1) += dNdYi * itNode->X();
Fgrad(1, 0) += dNdXi * itNode->Y();
Fgrad(1, 1) += dNdYi * itNode->Y();
double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
unsigned int firstRow = 2;
if (neighSize > 0)
{
for (unsigned int i = 0; i < neighSize - 1; i++) //neigh_nodes has one cell less than nodalSFDneighboursId becuase this has also the considered node ID at the beginning
{
dNdXi = rNodalSFDneigh[firstRow];
dNdYi = rNodalSFDneigh[firstRow + 1];
unsigned int neigh_nodes_id = neighb_nodes[i].Id();
unsigned int other_neigh_nodes_id = nodalSFDneighboursId[i + 1];
if (neigh_nodes_id != other_neigh_nodes_id)
{
std::cout << "node (x,y)=(" << itNode->X() << "," << itNode->Y() << ") with neigh_nodes_id " << neigh_nodes_id << " different than other_neigh_nodes_id " << other_neigh_nodes_id << std::endl;
}
Fgrad(0, 0) += dNdXi * neighb_nodes[i].X();
Fgrad(0, 1) += dNdYi * neighb_nodes[i].X();
Fgrad(1, 0) += dNdXi * neighb_nodes[i].Y();
Fgrad(1, 1) += dNdYi * neighb_nodes[i].Y();
VelocityX = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
VelocityY = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
firstRow += 2;
}
}
}
else
{
double dNdXi = rNodalSFDneigh[0];
double dNdYi = rNodalSFDneigh[1];
double dNdZi = rNodalSFDneigh[2];
double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
double VelocityZ = itNode->FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta);
Fgrad(0, 0) += dNdXi * itNode->X();
Fgrad(0, 1) += dNdYi * itNode->X();
Fgrad(0, 2) += dNdZi * itNode->X();
Fgrad(1, 0) += dNdXi * itNode->Y();
Fgrad(1, 1) += dNdYi * itNode->Y();
Fgrad(1, 2) += dNdZi * itNode->Y();
Fgrad(2, 0) += dNdXi * itNode->Z();
Fgrad(2, 1) += dNdYi * itNode->Z();
Fgrad(2, 2) += dNdZi * itNode->Z();
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(0, 2) += dNdZi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
FgradVel(1, 2) += dNdZi * VelocityY;
FgradVel(2, 0) += dNdXi * VelocityZ;
FgradVel(2, 1) += dNdYi * VelocityZ;
FgradVel(2, 2) += dNdZi * VelocityZ;
unsigned int firstRow = 3;
if (neighSize > 0)
{
for (unsigned int i = 0; i < neighSize - 1; i++)
{
dNdXi = rNodalSFDneigh[firstRow];
dNdYi = rNodalSFDneigh[firstRow + 1];
dNdZi = rNodalSFDneigh[firstRow + 2];
VelocityX = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
VelocityY = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
VelocityZ = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta);
Fgrad(0, 0) += dNdXi * neighb_nodes[i].X();
Fgrad(0, 1) += dNdYi * neighb_nodes[i].X();
Fgrad(0, 2) += dNdZi * neighb_nodes[i].X();
Fgrad(1, 0) += dNdXi * neighb_nodes[i].Y();
Fgrad(1, 1) += dNdYi * neighb_nodes[i].Y();
Fgrad(1, 2) += dNdZi * neighb_nodes[i].Y();
Fgrad(2, 0) += dNdXi * neighb_nodes[i].Z();
Fgrad(2, 1) += dNdYi * neighb_nodes[i].Z();
Fgrad(2, 2) += dNdZi * neighb_nodes[i].Z();
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(0, 2) += dNdZi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
FgradVel(1, 2) += dNdZi * VelocityY;
FgradVel(2, 0) += dNdXi * VelocityZ;
FgradVel(2, 1) += dNdYi * VelocityZ;
FgradVel(2, 2) += dNdZi * VelocityZ;
firstRow += 3;
}
}
}
itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD) = Fgrad;
itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL) = FgradVel;
KRATOS_CATCH("");
}
void ComputeAndStoreNodalDeformationGradientForInterfaceNode(ModelPart::NodeIterator itNode, Vector nodalSFDneighboursId, Vector rNodalSFDneigh, double theta, Matrix &Fgrad, Matrix &FgradVel)
{
KRATOS_TRY;
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
/* unsigned int idThisNode=nodalSFDneighboursId[0]; */
const unsigned int neighSize = nodalSFDneighboursId.size();
noalias(Fgrad) = ZeroMatrix(dimension, dimension);
noalias(FgradVel) = ZeroMatrix(dimension, dimension);
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
const unsigned int neighNodesSize = neighb_nodes.size();
if (dimension == 2)
{
double dNdXi = rNodalSFDneigh[0];
double dNdYi = rNodalSFDneigh[1];
Fgrad(0, 0) += dNdXi * itNode->X();
Fgrad(0, 1) += dNdYi * itNode->X();
Fgrad(1, 0) += dNdXi * itNode->Y();
Fgrad(1, 1) += dNdYi * itNode->Y();
double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
unsigned int firstRow = 2;
if (neighSize > 0)
{
for (unsigned int i = 0; i < neighSize - 1; i++) //neigh_nodes has one cell less than nodalSFDneighboursId becuase this has also the considered node ID at the beginning
{
unsigned int other_neigh_nodes_id = nodalSFDneighboursId[i + 1];
for (unsigned int k = 0; k < neighNodesSize; k++)
{
unsigned int neigh_nodes_id = neighb_nodes[k].Id();
if (neigh_nodes_id == other_neigh_nodes_id)
{
dNdXi = rNodalSFDneigh[firstRow];
dNdYi = rNodalSFDneigh[firstRow + 1];
Fgrad(0, 0) += dNdXi * neighb_nodes[k].X();
Fgrad(0, 1) += dNdYi * neighb_nodes[k].X();
Fgrad(1, 0) += dNdXi * neighb_nodes[k].Y();
Fgrad(1, 1) += dNdYi * neighb_nodes[k].Y();
VelocityX = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
VelocityY = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
firstRow += 2;
break;
}
}
}
}
}
else
{
double dNdXi = rNodalSFDneigh[0];
double dNdYi = rNodalSFDneigh[1];
double dNdZi = rNodalSFDneigh[2];
double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
double VelocityZ = itNode->FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta);
Fgrad(0, 0) += dNdXi * itNode->X();
Fgrad(0, 1) += dNdYi * itNode->X();
Fgrad(0, 2) += dNdZi * itNode->X();
Fgrad(1, 0) += dNdXi * itNode->Y();
Fgrad(1, 1) += dNdYi * itNode->Y();
Fgrad(1, 2) += dNdZi * itNode->Y();
Fgrad(2, 0) += dNdXi * itNode->Z();
Fgrad(2, 1) += dNdYi * itNode->Z();
Fgrad(2, 2) += dNdZi * itNode->Z();
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(0, 2) += dNdZi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
FgradVel(1, 2) += dNdZi * VelocityY;
FgradVel(2, 0) += dNdXi * VelocityZ;
FgradVel(2, 1) += dNdYi * VelocityZ;
FgradVel(2, 2) += dNdZi * VelocityZ;
unsigned int firstRow = 3;
if (neighSize > 0)
{
for (unsigned int i = 0; i < neighSize - 1; i++)
{
unsigned int other_neigh_nodes_id = nodalSFDneighboursId[i + 1];
for (unsigned int k = 0; k < neighNodesSize; k++)
{
unsigned int neigh_nodes_id = neighb_nodes[k].Id();
if (neigh_nodes_id == other_neigh_nodes_id)
{
dNdXi = rNodalSFDneigh[firstRow];
dNdYi = rNodalSFDneigh[firstRow + 1];
dNdZi = rNodalSFDneigh[firstRow + 2];
VelocityX = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
VelocityY = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
VelocityZ = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta);
Fgrad(0, 0) += dNdXi * neighb_nodes[k].X();
Fgrad(0, 1) += dNdYi * neighb_nodes[k].X();
Fgrad(0, 2) += dNdZi * neighb_nodes[k].X();
Fgrad(1, 0) += dNdXi * neighb_nodes[k].Y();
Fgrad(1, 1) += dNdYi * neighb_nodes[k].Y();
Fgrad(1, 2) += dNdZi * neighb_nodes[k].Y();
Fgrad(2, 0) += dNdXi * neighb_nodes[k].Z();
Fgrad(2, 1) += dNdYi * neighb_nodes[k].Z();
Fgrad(2, 2) += dNdZi * neighb_nodes[k].Z();
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(0, 2) += dNdZi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
FgradVel(1, 2) += dNdZi * VelocityY;
FgradVel(2, 0) += dNdXi * VelocityZ;
FgradVel(2, 1) += dNdYi * VelocityZ;
FgradVel(2, 2) += dNdZi * VelocityZ;
firstRow += 3;
break;
}
}
}
}
}
// itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD)=Fgrad;
// itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL)=FgradVel;
KRATOS_CATCH("");
}
void UpdateTopology(ModelPart &rModelPart, unsigned int echoLevel)
{
KRATOS_TRY;
// std::cout<<" UpdateTopology ..."<<std::endl;
/* this->CalculateDisplacements(); */
CalculateDisplacementsAndResetNodalVariables();
BaseType::MoveMesh();
BoundaryNormalsCalculationUtilities BoundaryComputation;
BoundaryComputation.CalculateWeightedBoundaryNormals(rModelPart, echoLevel);
// std::cout<<" UpdateTopology DONE"<<std::endl;
KRATOS_CATCH("");
}
void CalculateDisplacementsAndResetNodalVariables()
{
ModelPart &rModelPart = BaseType::GetModelPart();
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double TimeStep = rCurrentProcessInfo[DELTA_TIME];
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
unsigned int sizeStrains = 3 * (dimension - 1);
// #pragma omp parallel
// {
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator i = NodesBegin; i != NodesEnd; ++i)
{
array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0);
array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3> &CurrentDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 0);
array_1d<double, 3> &PreviousDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 1);
CurrentDisplacement[0] = 0.5 * TimeStep * (CurrentVelocity[0] + PreviousVelocity[0]) + PreviousDisplacement[0];
CurrentDisplacement[1] = 0.5 * TimeStep * (CurrentVelocity[1] + PreviousVelocity[1]) + PreviousDisplacement[1];
if (dimension == 3)
{
CurrentDisplacement[2] = 0.5 * TimeStep * (CurrentVelocity[2] + PreviousVelocity[2]) + PreviousDisplacement[2];
}
///// reset Nodal variables //////
Vector &rNodalSFDneighbours = i->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
unsigned int sizeSDFNeigh = rNodalSFDneighbours.size();
// unsigned int neighbourNodes=i->GetValue(NEIGHBOUR_NODES).size()+1;
// unsigned int sizeSDFNeigh=neighbourNodes*dimension;
i->FastGetSolutionStepValue(NODAL_VOLUME) = 0;
i->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0;
i->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0;
i->FastGetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = 0;
i->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = 0;
noalias(rNodalSFDneighbours) = ZeroVector(sizeSDFNeigh);
Vector &rSpatialDefRate = i->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE);
noalias(rSpatialDefRate) = ZeroVector(sizeStrains);
Matrix &rFgrad = i->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
noalias(rFgrad) = ZeroMatrix(dimension, dimension);
Matrix &rFgradVel = i->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
noalias(rFgradVel) = ZeroMatrix(dimension, dimension);
// if(i->FastGetSolutionStepValue(INTERFACE_NODE)==true){
Vector &rSolidNodalSFDneighbours = i->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS);
unsigned int solidSizeSDFNeigh = rSolidNodalSFDneighbours.size();
// unsigned int solidSizeSDFNeigh=solidNeighbourNodes*dimension;
i->FastGetSolutionStepValue(SOLID_NODAL_VOLUME) = 0;
i->FastGetSolutionStepValue(SOLID_NODAL_MEAN_MESH_SIZE) = 0;
i->FastGetSolutionStepValue(SOLID_NODAL_FREESURFACE_AREA) = 0;
i->FastGetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = 0;
i->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = 0;
noalias(rSolidNodalSFDneighbours) = ZeroVector(solidSizeSDFNeigh);
Vector &rSolidSpatialDefRate = i->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE);
noalias(rSolidSpatialDefRate) = ZeroVector(sizeStrains);
Matrix &rSolidFgrad = i->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
noalias(rSolidFgrad) = ZeroMatrix(dimension, dimension);
Matrix &rSolidFgradVel = i->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
noalias(rSolidFgradVel) = ZeroMatrix(dimension, dimension);
// }
}
// }
}
/// Turn back information as a string.
std::string Info() const override
{
std::stringstream buffer;
buffer << "NodalTwoStepVPStrategyForFSI";
return buffer.str();
}
/// Print information about this object.
void PrintInfo(std::ostream &rOStream) const override
{
rOStream << "NodalTwoStepVPStrategyForFSI";
}
// /// Print object's data.
// void PrintData(std::ostream& rOStream) const override
// {
// }
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected Life Cycle
///@{
///@}
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
NodalTwoStepVPStrategyForFSI &operator=(NodalTwoStepVPStrategyForFSI const &rOther) {}
/// Copy constructor.
NodalTwoStepVPStrategyForFSI(NodalTwoStepVPStrategyForFSI const &rOther) {}
///@}
}; /// Class NodalTwoStepVPStrategyForFSI
///@}
///@name Type Definitions
///@{
///@}
///@} // addtogroup
} // namespace Kratos.
#endif // KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_H
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 4;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
agilekeychain_fmt_plug.c | /* 1Password Agile Keychain cracker patch for JtR. Hacked together during
* July of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* This software is based on "agilekeychain" project but no actual code is
* borrowed from it.
*
* "agilekeychain" project is at https://bitbucket.org/gwik/agilekeychain
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_agile_keychain;
#elif FMT_REGISTERS_H
john_register_one(&fmt_agile_keychain);
#else
#include <string.h>
#include <errno.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1 // tuned on core i7
#endif
#endif
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "johnswap.h"
#include "options.h"
#include "pbkdf2_hmac_sha1.h"
#include "aes.h"
#include "jumbo.h"
#include "memdbg.h"
#define FORMAT_LABEL "agilekeychain"
#define FORMAT_NAME "1Password Agile Keychain"
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-SHA1 AES " SHA1_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-SHA1 AES 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define SALT_ALIGN sizeof(int)
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(struct custom_salt)
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define SALTLEN 8
#define IVLEN 8
#define CTLEN 1040
static struct fmt_tests agile_keychain_tests[] = {
{"$agilekeychain$2*1000*8*7146eaa1cca395e5*1040*e7eb81496717d35f12b83024bb055dec00ea82843886cbb8d0d77302a85d89b1d2c0b5b8275dca44c168cba310344be6eea3a79d559d0846a9501f4a012d32b655047673ef66215fc2eb4e944a9856130ee7cd44523017bbbe2957e6a81d1fd128434e7b83b49b8a014a3e413a1d76b109746468070f03f19d361a21c712ef88e05b04f8359f6dd96c1c4487ea2c9df22ea9029e9bc8406d37850a5ead03062283a42218c134d05ba40cddfe46799c931291ec238ee4c11dc71d2b7e018617d4a2bf95a0c3c1f98ea14f886d94ee2a65871418c7c237f1fe52d3e176f8ddab6dfd4bc039b6af36ab1bc9981689c391e71703e31979f732110b84d5fccccf59c918dfcf848fcd80c6da62ced6e231497b9cbef22d5edca439888556bae5e7b05571ac34ea54fafc03fb93e4bc17264e50a1d04b688fcc8bc715dd237086c2537c32de34bbb8a29de0208800af2a9b561551ae6561099beb61045f22dbe871fab5350e40577dd58b4c8fb1232f3f85b8d2e028e5535fd131988a5df4c0408929b8eac6d751dcc698aa1d79603251d90a216ae5e28bffc0610f61fefe0a23148dcc65ab88b117dd3b8d311157424867eb0261b8b8c5b11def85d434dd4c6dc7036822a279a77ec640b28da164bea7abf8b634ba0e4a13d9a31fdcfebbdbe53adcdf2564d656e64923f76bc2619428abdb0056ce20f47f3ece7d4d11dc55d2969684ca336725561cb27ce0504d57c88a2782daccefb7862b385d494ce70fef93d68e673b12a68ba5b8c93702be832d588ac935dbf0a7b332e42d1b6da5f87aed03498a37bb41fc78fcdbe8fe1f999fe756edf3a375beb54dd508ec45af07985f1430a105e552d9817106ae12d09906c4c28af575d270308a950d05c07da348f59571184088d46bbef3e7a2ad03713e90b435547b23f340f0f5d00149838d9919d40dac9b337920c7e577647fe4e2811f05b8e888e3211d9987cf922883aa6e53a756e579f7dff91c297fcc5cda7d10344545f64099cfd2f8fd59ee5c580ca97cf8b17e0222b764df25a2a52b81ee9db41b3c296fcea1203b367e55d321c3504aeda8913b0cae106ccf736991030088d581468264b8486968e868a44172ad904d97e3e52e8370aaf52732e6ee6cc46eb33a901afc6b7c687b8f6ce0b2b4cdfe19c7139615195a052051becf39383ab83699a383a26f8a36c78887fe27ea7588c0ea21a27357ff9923a3d23ca2fb04ad671b63f8a8ec9b7fc969d3bece0f5ff19a40bc327b9905a6de2193ffe3aa1997e9266205d083776e3b94869164abcdb88d64b8ee5465f7165b75e1632abd364a24bb1426889955b8f0354f75c6fb40e254f7de53d8ef7fee9644bf2ebccd934a72bb1cc9c19d354d66996acbddd60d1241657359d9074a4b313b21af2ee4f10cf20f4122a5fad4ee4f37a682ffb7234bea61985d1ad130bfb9f4714461fb574dbf851c*1000*8*c05f3bc3e7f3cad7*1040*f3e3d091b64da1529b04b2795898b717faad59f7dae4bda25e6e267c28a56a7702e51991b2a3fb034cdda2d9bfd531dfd2c3af00f39fdfe8bcbdde02ab790415bcf071d133b15f647f55ff512730ae4914ce20b72184c827f6350ac768b00c9eab0e3322e084bb3e9e9439a10030950f5504dcc4f7ba614b27fde99bd0d743a58341e90ec313395486eb8068df205b7bdf25134ed97dd2e2883d7eb3e63b659602ada765084a69d7ed8fc55b60aa67718cc9e5bf31ab8f3029b32a4b001071848d2b76b5f4b921d2169ca287e9e78ecd904d040c817c7c7cde4ba8510b462e139c16519962ca0adb7d5f89d431cd4541a9a7aaec8d799697f4d3947d87884bed32ada13db725c72ab6450ac8fe989a94917cca784bcf6ffbe756f19d4e8897e0f80d8c318e13e5b30fc356646aaf038a952b0781f12dfef1f4bd6922ae05a573eeff4dbb064cfbb0fd62962a6a53a8de308da2b8e83baebfe261cb127f874a5eff3f05cda123ab2ba559cf444ce33b6845f4c902733b8982044151a8aa1859769082ade5928f2d4f616ce972ae8dde1f2be37d496ad16057008dfe678c75cbdc53db25ed311edbcf8b2a73bcd2809f6bd1d389aaeed82a75fa15676d08aa5390efdc189c180be6a52ec5a7371304d26e477039197671377d1ea3d6ee41e68a42348a4fe9a1d2400eaeba8ed0a7419b9694d780456d96378c00318a5be0f41afa887476b3bebb7cf30d61ca8fc77de35671a3053a517aa39444e01e1752da3146dc97eec5849d6f025c3d4bc6e0499b901f629d8a081ad35ed33602cbef5e9a68f090170fcc1f285eb094e3dc619740a067fd2aeeb20abbb17926c3ad097f3f0bad4de540d1829a985cd7e700100622ec47da046071c11a1597e5f093268b4ed79ffcf2450b9ba2b649b932fbce912bdb4da010581bd9c731be792c8f75177f6c8c4e1756d63a1491a8aae4bb11beeca118e7d08073b500dd82b81e4bdbeb15625afca8f1c8e06b2360da972587516ef62e91d1d9aad90e62226d53363bff318f5af21f69c234731ac22b09506a1b807d2366e88905668d960c7963daa93046e9a56db1d7a437e9a37aa7a2945197265478b264ec14d383030ef73504fd26d4be9e72ebddb14a00bf6bd66a3adaa1d17cada378a2b0bc852f961af52333f7966f8a60738dfd47e79ce537082f187117ffd31f54f53356b671154dfa245671c4cd054c1a8d303a202fccfae6d3f9e3646838cef38703b5e660b5ce7679f5898d801908f90092dbec335c98e4002041287fe9bfa7d7828a29ab240ec2cedc9fa12cfd7c3ef7b61dad4fbf2ef9c0a904dbde1b3792fb5178607608dc9fc2fbc85addf89fa3df94317e729810b508356b5bb176cdb022afb0ec5eeff4d5081b66733d1be1b54cc4f080bfc33187663b5ab185472b35dc8812e201472e6af376c43ee23aa2db6cd04bddd79b99b0c28c48a5ae", "openwall"},
{"$agilekeychain$1*1000*8*54434b3047723444*1040*316539685a36617546544a61466e35743970356559624464304467394a4a41615459594a6b66454c5462417a7a694b5751474e4748595036344f3945374b414b676b6b7278673658794e63734a316c48656b496a3156346a544c6861797537347032466b4d6b416d31704a6b5063547a44703152544f72696e6e38347732597672774f6476414c70346462595a7678656b6e5958716b7a61746d5874514e575965564735627a437578584e4a573050567939413073306c377a4d726e6d576a6655424455394f4934696c48454f4d536e635567393950686d4171364f76747749446130454c6d74783069704d30456d45374f56736e486a5534667877327a526e52596e55454452393544437042646e6739355938714836584968664c4d7a726a4f63544c6858385141464c71565463664270493761664d633055447879613169456a72664479346438305641417054754775477a475266766c4774543668673848624d31636c37624e73743549634457655375507138535139396c4c39364c4f6f757a43305535586161364b47676a61713971394459526a78744e547459797a6a57715a3575534364487a4430306d4e4e39483277674c733238726463616d4f5146467957374234727252774b6d6161664b6d67414d5854496444665848684c376c6c776d47477a4b57566d5a3646346e775441446f3659745038646d336b6370494d50676742797a41325630716e794833793237494152496477556e4d6c4751497367346672635364486e6e71504f6e6264575953584462586c6e573947347a567163535333366e3253504d65656b45483841544f6952384d6170724471706c4a307863713653707265624f544a4d5139377562454a334b776e4879746a37704e37694557484d69696d436f484973613443754d484b4f51484833545a364654694a6d31783061665536796c444f7257666964397243444f684d305a324c6b75693953716664354b435963703559354978757a64354a755158394136663744435a674e4c73484a7935737a707739724c783077316631637349757a6d696252576244396a537730593143633348385a775734534b646569684f634f4c35323364734b7179625750364b76344a4a56626c4f727069366f575a386432745375684c464e42643173445a6a50745743696e666a4458325058644d57654c596d326f5763516a7951524a566372354d4d58435877765172596b734c59354476455156746d75504830444a4e47624e31524f4d544b4a6b4d675835305a7a56736758794c475057714e78496452725269484c75424f4d6d793550677277727453597045566e304c5642764c5a6732504c7a4e71584c4c67634979637369554a3446497655795a78583547306b365a4e337477786c7961796b4d787463796971596f516fcb3584235d7ecde5f8b7bc2b8f1e9e2e*46c3b75f6e4cf139e92f683f32107271", "123"},
{"$agilekeychain$1*1000*8*7a697868444e7458*1040*773954704874444d4d523043546b44375135544f74675a754532624a45794848305949436e4e724d336c524c39316247426a7843317131614152736d50724c6474586a4d4d445954786c31376d363155437130777a414d36586c7045555457424a5a436a657541456742417961654472745a73576e4b7a7a344d547043567846526655524b4339573631756f3850465a3878306b7176644c4253787071764c58376e716a50674f526d4a4e4b546e3359575175614b304a3964756f756935675a77544f4e6770654855776f79553465786e41364d6376496b7651624762424d62756746796a6753514c37793069783869683773454c533559365946584f545246616d48495730464e634d42466e51367856797a4368517335674a755972434b545944633270764e54775879563542776675386b6e4462506b743138694a756d63447134745361526a32373167366e787375514e346a73574e77796b4b49376d3677653448754c364b5a41514633626e71786130634458544e484a436551386e7679304b786d73346f774a383268665167596b466e39317a307269714434546d4d6173416e344b6a74455a584846526a6659746742504262495958386336755241386c496633417666696d7a5036425745757461736b684574794a5230436d50466d4b536375764674674562315679766a43453077356e614b476d345849395a726b7037626153496b6a66634f355261795157645941487731516f564c6764516d4e3074394b3839526341626f6b6b38324465497068624553646f4177786e6f68347779523338394f4e6561315271635236374d424d695978304b336b4a6966776e74614f4b43483237434b596a6630774e79394a4b7153714a48616b4b364455596a454b31433767786a72303450706d44666373574c5a61324f335852474b756c456b76483349754e3156654f417342324d6f75346d4b78774e43424863566e344c4c6c6c6d4e446b617550415a6f3337764f55484b4156344d4769336267344f4737794c354c5567636a565a6b7369616730383377744d69513431333032305a4a3747794944714d67396a5651444132424e79507a34726d346c333552757a764b6c543073437562534376714f346a5939784a546f683358517348623378716677313231383261685357743236455a6a6b6674365870554642386436574c374430635177347278736a744a6e463530756365684c7779497557366550356936514e704e4863353863437165397163496146794a726555714c623438543235396371416154326c66375276746e3550727453306b7042335961364239586c3359384b464865564e677636537234414e4d6c55583867456376686e43646e6e776a6f656d7152613453725148503462744b4a334565714f6e624a774a65623258552fff2bf0505a0bc88b9cbc9073a74586*a6f6556c971bd3ad40b52751ba025713", ""},
{"$agilekeychain$1*1000*8*7a65613743636950*1040*524a397449393859696b4a576e437763716a574947544a6d306e32474442343355764a7a6948517a45686d7569636631514745347448424e4e6b32564239656a55596f724671547638736d4e66783949504b6f38746b6f49426d4d6b794c7a6d3077327639365a4b515934357774664a477247366b5539486135495863766845714146317458356b725a6a50376f726e55734b3136533756706a4b42516165656a50336e4558616450794f59506f4771347268454730784555485a4f5a4772526a76354f45417470616258375a386436474b366f7653583257335939516d4f5364446a414b674e467a31374f716d73516b3362795776305a414a314f63324d616a6c6472413939443879414c523733794c47467654734d7a6a4734733461674353357a4456527841486233646d446e797448696837377364784344704831784f6a5975666168626b5534796678576c59584d4b3448704a784a4f675a6d7672636b5a4b567071445a345a376648624b55414b7262694972384531336c7a6875725a6f44627571775361774b66417743336230614e4166564954334a6c3477666b4254374f747565394b32667266566d3263416a656c79416c45724b3035504a4e42307a33303632483466664272705765415a4f3552416a36544e5a54415a5976666a4b53675a68493071394a6563426964544a4f564d304a773976394944444339516e564a78587539366974586c4f6132717937354c554b65384b7638585132596832417a5271314e4b5653766d4d50506d3554463762763961554e45695a51436e79504f6e7146617a755231373574455365305446624c636450424a43526a49384b32365967496a734c324e525574526e36714c533065694f536c6c37795a456945476d4a6e327262646942416c485046616e384e4d7869427571777355714e7638305267537752726245696c734d68664b53793836684b39445a716b47546d4b59747176474c6b6a6d52513368796b367a356449706c64385541614236546e426a6b4f64766d33493972763941765a71776345686b734c594a7254446c796f46444b6d557441305a636b414e437245587a63487a30304c50564e4e73694d634d5a6f4f74414534424f53685879374e62545734487a555054774a7056686f6a7453666a664e696d354548345631374c61396862586659666332304e465a5678656a304b4d59586d586547634d67474c6d31794a4b546473474c755a697579625779503259726d6d5248544f6f704b575046556e3438415a48474168396d787136327230367248774e73493439693049794b3765314b4f74547265556c564b6e6d594a5959355a7476334b546f75375a6a676c755a557a39744b54747745583948314a37366e6c6d5a53345079555856696438336876596141617a394438711ee66b990b013609582733309b01df00*444f4656a5ec58e8a75204fb25fd5ae5", "PASSWORD"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked;
static struct custom_salt {
unsigned int nkeys;
unsigned int iterations[2];
unsigned int saltlen[2];
unsigned char salt[2][SALTLEN];
unsigned int ctlen[2];
unsigned char ct[2][CTLEN];
} *cur_salt;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
int omp_t = 1;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_align(sizeof(*saved_key),
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
cracked = mem_calloc_align(sizeof(*cracked),
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr;
int ctlen;
int saltlen;
char *p;
if (strncmp(ciphertext, "$agilekeychain$", 15) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += 15;
if ((p = strtokm(ctcopy, "*")) == NULL) /* nkeys */
goto err;
if (!isdec(p))
goto err;
if (atoi(p) > 2)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iterations */
goto err;
if (!isdec(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt length */
goto err;
if (!isdec(p))
goto err;
saltlen = atoi(p);
if(saltlen > SALTLEN)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt */
goto err;
if(strlen(p) != saltlen * 2)
goto err;
if(!ishexlc(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* ct length */
goto err;
if (!isdec(p))
goto err;
ctlen = atoi(p);
if (ctlen > CTLEN)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* ciphertext */
goto err;
if(strlen(p) != ctlen * 2)
goto err;
if(!ishexlc(p))
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += 15; /* skip over "$agilekeychain$" */
p = strtokm(ctcopy, "*");
cs.nkeys = atoi(p);
p = strtokm(NULL, "*");
cs.iterations[0] = atoi(p);
p = strtokm(NULL, "*");
cs.saltlen[0] = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.saltlen[0]; i++)
cs.salt[0][i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.ctlen[0] = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.ctlen[0]; i++)
cs.ct[0][i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int akcdecrypt(unsigned char *derived_key, unsigned char *data)
{
unsigned char out[CTLEN];
int n, key_size;
AES_KEY akey;
unsigned char iv[16];
memcpy(iv, data + CTLEN - 32, 16);
if (AES_set_decrypt_key(derived_key, 128, &akey) < 0)
fprintf(stderr, "AES_set_decrypt_key failed in crypt!\n");
AES_cbc_encrypt(data + CTLEN - 16, out + CTLEN - 16, 16, &akey, iv, AES_DECRYPT);
n = check_pkcs_pad(out, CTLEN, 16);
if (n < 0)
return -1;
key_size = n / 8;
if (key_size != 128 && key_size != 192 && key_size != 256)
// "invalid key size"
return -1;
return 0;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
#ifdef SIMD_COEF_32
unsigned char master[MAX_KEYS_PER_CRYPT][32];
int lens[MAX_KEYS_PER_CRYPT], i;
unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = strlen(saved_key[i+index]);
pin[i] = (unsigned char*)saved_key[i+index];
pout[i] = master[i];
}
pbkdf2_sha1_sse((const unsigned char **)pin, lens, cur_salt->salt[0], cur_salt->saltlen[0], cur_salt->iterations[0], pout, 16, 0);
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
if(akcdecrypt(master[i], cur_salt->ct[0]) == 0)
cracked[i+index] = 1;
else
cracked[i+index] = 0;
}
#else
unsigned char master[32];
pbkdf2_sha1((unsigned char *)saved_key[index],
strlen(saved_key[index]),
cur_salt->salt[0], cur_salt->saltlen[0],
cur_salt->iterations[0], master, 16, 0);
if(akcdecrypt(master, cur_salt->ct[0]) == 0)
cracked[index] = 1;
else
cracked[index] = 0;
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void agile_keychain_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->iterations[0];
}
struct fmt_main fmt_agile_keychain = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT,
{
"iteration count",
},
agile_keychain_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
agile_keychain_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
ztrmm.c | #include "blas.h"
#include "error.h"
#include <stdio.h>
#include "handle.h"
#include "config.h"
#include "ztrmm.fatbin.c"
static inline size_t min(size_t a, size_t b) { return (a < b) ? a : b; }
static inline size_t max(size_t a, size_t b) { return (a > b) ? a : b; }
static inline CUresult cuMemcpyHtoD2DAsync(CUdeviceptr A, size_t lda, size_t ai, size_t aj,
const void * B, size_t ldb, size_t bi, size_t bj,
size_t m, size_t n, size_t elemSize, CUstream stream) {
CUDA_MEMCPY2D copy = {
bi * elemSize, bj, CU_MEMORYTYPE_HOST, B, 0, 0, ldb * elemSize,
ai * elemSize, aj, CU_MEMORYTYPE_DEVICE, NULL, A, 0, lda * elemSize,
m * elemSize, n };
return cuMemcpy2DAsync(©, stream);
}
static inline CUresult cuMemcpyDtoH2DAsync(void * A, size_t lda, size_t ai, size_t aj,
CUdeviceptr B, size_t ldb, size_t bi, size_t bj,
size_t m, size_t n, size_t elemSize, CUstream stream) {
CUDA_MEMCPY2D copy = {
bi * elemSize, bj, CU_MEMORYTYPE_DEVICE, NULL, B, 0, ldb * elemSize,
ai * elemSize, aj, CU_MEMORYTYPE_HOST, A, 0, 0, lda * elemSize,
m * elemSize, n };
return cuMemcpy2DAsync(©, stream);
}
static inline CUresult cuMemcpyDtoD2DAsync(CUdeviceptr A, size_t lda, size_t ai, size_t aj,
CUdeviceptr B, size_t ldb, size_t bi, size_t bj,
size_t m, size_t n, size_t elemSize, CUstream stream) {
CUDA_MEMCPY2D copy = {
bi * elemSize, bj, CU_MEMORYTYPE_DEVICE, NULL, B, 0, ldb * elemSize,
ai * elemSize, aj, CU_MEMORYTYPE_DEVICE, NULL, A, 0, lda * elemSize,
m * elemSize, n };
return cuMemcpy2DAsync(©, stream);
}
static const double complex zero = 0.0 + 0.0 * I;
static const double complex one = 1.0 + 0.0 * I;
void ztrmm(CBlasSide side, CBlasUplo uplo, CBlasTranspose trans, CBlasDiag diag,
size_t m, size_t n,
double complex alpha, const double complex * restrict A, size_t lda,
double complex * restrict B, size_t ldb) {
const size_t nRowA = (side == CBlasLeft) ? m : n;
int info = 0;
if (lda < nRowA)
info = 9;
else if (ldb < m)
info = 11;
if (info != 0) {
XERBLA(info);
return;
}
if (m == 0 || n == 0)
return;
if (alpha == zero) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t i = 0; i < m; i++)
B[j * ldb + i] = zero;
}
return;
}
if (side == CBlasLeft) {
if (trans == CBlasNoTrans) {
if (uplo == CBlasUpper) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t k = 0; k < m; k++) {
if (B[j * ldb + k] != zero) {
register double complex temp = alpha * B[j * ldb + k];
for (size_t i = 0; i < k; i++)
B[j * ldb + i] += temp * A[k * lda + i];
if (diag == CBlasNonUnit) temp *= A[k * lda + k];
B[j * ldb + k] = temp;
}
}
}
}
else {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
size_t k = m - 1;
do {
if (B[j * ldb + k] != zero) {
register double complex temp = alpha * B[j * ldb + k];
B[j * ldb + k] = temp;
if (diag == CBlasNonUnit) B[j * ldb + k] *= A[k * lda + k];
for (size_t i = k + 1; i < m; i++)
B[j * ldb + i] += temp * A[k * lda + i];
}
} while (k-- > 0);
}
}
}
else {
if (uplo == CBlasUpper) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
size_t i = m - 1;
do {
register double complex temp = B[j * ldb + i];
if (trans == CBlasTrans) {
if (diag == CBlasNonUnit) temp *= A[i * lda + i];
for (size_t k = 0; k < i; k++)
temp += A[i * lda + k] * B[j * ldb + k];
}
else {
if (diag == CBlasNonUnit) temp *= conj(A[i * lda + i]);
for (size_t k = 0; k < i; k++)
temp += conj(A[i * lda + k]) * B[j * ldb + k];
}
B[j * ldb + i] = alpha * temp;
} while (i-- > 0);
}
}
else {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t i = 0; i < m; i++) {
register double complex temp = B[j * ldb + i];
if (trans == CBlasTrans) {
if (diag == CBlasNonUnit) temp *= A[i * lda + i];
for (size_t k = i + 1; k < m; k++)
temp += A[i * lda + k] * B[j * ldb + k];
}
else {
if (diag == CBlasNonUnit) temp *= conj(A[i * lda + i]);
for (size_t k = i + 1; k < m; k++)
temp += conj(A[i * lda + k]) * B[j * ldb + k];
}
B[j * ldb + i] = alpha * temp;
}
}
}
}
}
else {
if (trans == CBlasNoTrans) {
if (uplo == CBlasUpper) {
size_t j = n - 1;
do {
register double complex temp = alpha;
if (diag == CBlasNonUnit) temp *= A[j * lda + j];
for (size_t i = 0; i < m; i++)
B[j * ldb + i] *= temp;
for (size_t k = 0; k < j; k++) {
if (A[j * lda + k] != zero) {
register double complex temp = alpha * A[j * lda + k];
for (size_t i = 0; i < m; i++)
B[j * ldb + i] += temp * B[k * ldb + i];
}
}
} while (j-- > 0);
}
else {
for (size_t j = 0; j < n; j++) {
register double complex temp = alpha;
if (diag == CBlasNonUnit) temp *= A[j * lda + j];
for (size_t i = 0; i < m; i++)
B[j * ldb + i] *= temp;
for (size_t k = j + 1; k < n; k++) {
if (A[j * lda + k] != zero) {
register double complex temp = alpha * A[j * lda + k];
for (size_t i = 0; i < m; i++)
B[j * ldb + i] += temp * B[k * ldb + i];
}
}
}
}
}
else {
if (uplo == CBlasUpper) {
for (size_t k = 0; k < n; k++) {
for (size_t j = 0; j < k; j++) {
if (A[k * lda + j] != zero) {
register double complex temp;
if (trans == CBlasTrans)
temp = alpha * A[k * lda + j];
else
temp = alpha * conj(A[k * lda + j]);
for (size_t i = 0; i < m; i++)
B[j * ldb + i] += temp * B[k * ldb + i];
}
}
register double complex temp = alpha;
if (diag == CBlasNonUnit)
temp *= ((trans == CBlasTrans) ? A[k * lda + k] : conj(A[k * lda + k]));
if (temp != one) {
for (size_t i = 0; i < m; i++)
B[k * ldb + i] = temp * B[k * ldb + i];
}
}
}
else {
size_t k = n - 1;
do {
for (size_t j = k + 1; j < n; j++) {
if (A[k * lda + j] != zero) {
register double complex temp;
if (trans == CBlasTrans)
temp = alpha * A[k * lda + j];
else
temp = alpha * conj(A[k * lda + j]);
for (size_t i = 0; i < m; i++)
B[j * ldb + i] += temp * B[k * ldb + i];
}
}
register double complex temp = alpha;
if (diag == CBlasNonUnit)
temp *= ((trans == CBlasTrans) ? A[k * lda + k] : conj(A[k * lda + k]));
if (temp != one) {
for (size_t i = 0; i < m; i++)
B[k * ldb + i] = temp * B[k * ldb + i];
}
} while (k-- > 0);
}
}
}
}
void ztrmm2(CBlasSide side, CBlasUplo uplo, CBlasTranspose trans, CBlasDiag diag,
size_t m, size_t n,
double complex alpha, const double complex * restrict A, size_t lda,
const double complex * restrict B, size_t ldb,
double complex * restrict X, size_t ldx) {
const size_t nRowA = (side == CBlasLeft) ? m : n;
int info = 0;
if (lda < nRowA)
info = 9;
else if (ldb < m)
info = 11;
else if (ldx < m)
info = 13;
if (info != 0) {
XERBLA(info);
return;
}
if (m == 0 || n == 0)
return;
if (alpha == zero) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t i = 0; i < m; i++)
X[j * ldx + i] = zero;
}
return;
}
if (side == CBlasLeft) {
if (trans == CBlasNoTrans) {
if (uplo == CBlasUpper) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t k = 0; k < m; k++) {
register double complex temp = B[j * ldb + k];
if (temp != zero) {
temp *= alpha;
for (size_t i = 0; i < k; i++)
X[j * ldx + i] += temp * A[k * lda + i];
if (diag == CBlasNonUnit) temp *= A[k * lda + k];
}
X[j * ldx + k] = temp;
}
}
}
else {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
size_t k = m - 1;
do {
if (B[j * ldb + k] != zero) {
register double complex temp = alpha * B[j * ldb + k];
X[j * ldx + k] = temp;
if (diag == CBlasNonUnit) X[j * ldx + k] *= A[k * lda + k];
for (size_t i = k + 1; i < m; i++)
X[j * ldx + i] += temp * A[k * lda + i];
}
else
X[j * ldx + k] = B[j * ldb + k];
} while (k-- > 0);
}
}
}
else {
if (uplo == CBlasUpper) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
size_t i = m - 1;
do {
register double complex temp = B[j * ldb + i];
if (trans == CBlasTrans) {
if (diag == CBlasNonUnit) temp *= A[i * lda + i];
for (size_t k = 0; k < i; k++)
temp += A[i * lda + k] * B[j * ldb + k];
}
else {
if (diag == CBlasNonUnit) temp *= conj(A[i * lda + i]);
for (size_t k = 0; k < i; k++)
temp += conj(A[i * lda + k]) * B[j * ldb + k];
}
X[j * ldx + i] = alpha * temp;
} while (i-- > 0);
}
}
else {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t i = 0; i < m; i++) {
register double complex temp = B[j * ldb + i];
if (trans == CBlasTrans) {
if (diag == CBlasNonUnit) temp *= A[i * lda + i];
for (size_t k = i + 1; k < m; k++)
temp += A[i * lda + k] * B[j * ldb + k];
}
else {
if (diag == CBlasNonUnit) temp *= conj(A[i * lda + i]);
for (size_t k = i + 1; k < m; k++)
temp += conj(A[i * lda + k]) * B[j * ldb + k];
}
X[j * ldx + i] = alpha * temp;
}
}
}
}
}
else {
if (trans == CBlasNoTrans) {
if (uplo == CBlasUpper) {
size_t j = n - 1;
do {
register double complex temp = alpha;
if (diag == CBlasNonUnit) temp *= A[j * lda + j];
for (size_t i = 0; i < m; i++)
X[j * ldx + i] = temp * B[j * ldb + i];
for (size_t k = 0; k < j; k++) {
if (A[j * lda + k] != zero) {
register double complex temp = alpha * A[j * lda + k];
for (size_t i = 0; i < m; i++)
X[j * ldx + i] += temp * B[k * ldb + i];
}
}
} while (j-- > 0);
}
else {
for (size_t j = 0; j < n; j++) {
register double complex temp = alpha;
if (diag == CBlasNonUnit) temp *= A[j * lda + j];
for (size_t i = 0; i < m; i++)
X[j * ldx + i] = temp * B[j * ldb + i];
for (size_t k = j + 1; k < n; k++) {
if (A[j * lda + k] != zero) {
register double complex temp = alpha * A[j * lda + k];
for (size_t i = 0; i < m; i++)
X[j * ldx + i] += temp * B[k * ldb + i];
}
}
}
}
}
else {
if (uplo == CBlasUpper) {
for (size_t k = 0; k < n; k++) {
for (size_t j = 0; j < k; j++) {
if (A[k * lda + j] != zero) {
register double complex temp;
if (trans == CBlasTrans)
temp = alpha * A[k * lda + j];
else
temp = alpha * conj(A[k * lda + j]);
for (size_t i = 0; i < m; i++)
X[j * ldx + i] += temp * B[k * ldb + i];
}
}
register double complex temp = alpha;
if (diag == CBlasNonUnit)
temp *= ((trans == CBlasTrans) ? A[k * lda + k] : conj(A[k * lda + k]));
if (temp != one) {
for (size_t i = 0; i < m; i++)
X[k * ldx + i] = temp * B[k * ldb + i];
}
}
}
else {
size_t k = n - 1;
do {
for (size_t j = k + 1; j < n; j++) {
if (A[k * lda + j] != zero) {
register double complex temp;
if (trans == CBlasTrans)
temp = alpha * A[k * lda + j];
else
temp = alpha * conj(A[k * lda + j]);
for (size_t i = 0; i < m; i++)
X[j * ldx + i] += temp * B[k * ldb + i];
}
}
register double complex temp = alpha;
if (diag == CBlasNonUnit)
temp *= ((trans == CBlasTrans) ? A[k * lda + k] : conj(A[k * lda + k]));
if (temp != one) {
for (size_t i = 0; i < m; i++)
X[k * ldx + i] = temp * B[k * ldb + i];
}
} while (k-- > 0);
}
}
}
}
CUresult cuZtrmm2(CUBLAShandle handle,
CBlasSide side, CBlasUplo uplo, CBlasTranspose trans, CBlasDiag diag,
size_t m, size_t n,
double complex alpha, CUdeviceptr A, size_t lda, CUdeviceptr B, size_t ldb,
CUdeviceptr X, size_t ldx, CUstream stream) {
const size_t nRowA = (side == CBlasLeft) ? m : n;
int info = 0;
if (lda < nRowA)
info = 9;
else if (ldb < m)
info = 11;
else if (ldx < m)
info = 13;
if (info != 0) {
XERBLA(info);
return CUDA_ERROR_INVALID_VALUE;
}
if (m == 0 || n == 0)
return CUDA_SUCCESS;
CU_ERROR_CHECK(cuCtxPushCurrent(handle->context));
if (handle->ztrmm2 == NULL)
CU_ERROR_CHECK(cuModuleLoadData(&handle->ztrmm2, imageBytes));
const unsigned int mb = (trans == CBlasNoTrans) ? 64 : 8;
const unsigned int nb = (trans == CBlasNoTrans) ? 4 : 8;
const unsigned int kb = (trans == CBlasNoTrans) ? 16 : 4;
const unsigned int bx = (trans == CBlasNoTrans) ? 16 : 4;
const unsigned int by = (trans == CBlasNoTrans) ? 4 : 8;
char name[95];
if (trans == CBlasNoTrans)
snprintf(name, 78,
"_Z8ztrmm%c%c%cIL9CBlasDiag%dELj%uELj%uELj%uELj%uELj%uEEv7double2PKS1_S3_PS1_iiiii",
side, uplo, trans, diag, mb, nb, kb, bx, by);
else
snprintf(name, 95,
"_Z8ztrmm%c%cTIL14CBlasTranspose%dEL9CBlasDiag%dELj%uELj%uELj%uELj%uELj%uEEv7double2PKS2_S4_PS2_iiiii",
side, uplo, trans, diag, mb, nb, kb, bx, by);
CUfunction function;
CU_ERROR_CHECK(cuModuleGetFunction(&function, handle->ztrmm2, name));
void * params[] = { &alpha, &A, &B, &X, &lda, &ldb, &ldx, &m, &n };
CU_ERROR_CHECK(cuLaunchKernel(function,
(unsigned int)(m + mb - 1) / mb, (unsigned int)(n + nb - 1) / nb, 1,
bx, by, 1,
0, stream, params, NULL));
CU_ERROR_CHECK(cuCtxPopCurrent(&handle->context));
return CUDA_SUCCESS;
}
CUresult cuZtrmm(CUBLAShandle handle,
CBlasSide side, CBlasUplo uplo, CBlasTranspose trans, CBlasDiag diag,
size_t m, size_t n,
double complex alpha, CUdeviceptr A, size_t lda, CUdeviceptr B, size_t ldb,
CUstream stream) {
const size_t nRowA = (side == CBlasLeft) ? m : n;
int info = 0;
if (lda < nRowA)
info = 9;
else if (ldb < m)
info = 11;
if (info != 0) {
XERBLA(info);
return CUDA_ERROR_INVALID_VALUE;
}
if (m == 0 || n == 0)
return CUDA_SUCCESS;
CU_ERROR_CHECK(cuCtxPushCurrent(handle->context));
CUdeviceptr X;
size_t ldx;
CU_ERROR_CHECK(cuMemAllocPitch(&X, &ldx, m * sizeof(double complex), n, sizeof(double complex)));
ldx /= sizeof(double complex);
CU_ERROR_CHECK(cuZtrmm2(handle, side, uplo, trans, diag, m, n, alpha, A, lda, B, ldb, X, ldx, stream));
CU_ERROR_CHECK(cuMemcpyDtoD2DAsync(B, ldb, 0, 0, X, ldx, 0, 0, m, n, sizeof(double complex), stream));
CU_ERROR_CHECK(cuMemFree(X));
CU_ERROR_CHECK(cuCtxPopCurrent(&handle->context));
return CUDA_SUCCESS;
}
CUresult cuMultiGPUZtrmm(CUmultiGPUBLAShandle handle,
CBlasSide side, CBlasUplo uplo, CBlasTranspose trans, CBlasDiag diag,
size_t m, size_t n,
double complex alpha, const double complex * restrict A, size_t lda,
double complex * restrict B, size_t ldb) {
const size_t nRowA = (side == CBlasLeft) ? m : n;
int info = 0;
if (lda < nRowA)
info = 9;
else if (ldb < m)
info = 11;
if (info != 0) {
XERBLA(info);
return CUDA_ERROR_INVALID_VALUE;
}
if (m == 0 || n == 0)
return CUDA_SUCCESS;
if (alpha == zero) {
zgemm(CBlasNoTrans, CBlasNoTrans, m, n, 0, zero, A, lda, B, ldb, zero, B, ldb);
return CUDA_SUCCESS;
}
const size_t mb = (trans == CBlasNoTrans) ? ZGEMM_N_MB : ZGEMM_CN_MB;
const size_t nb = ZGEMM_N_NB;
if (m <= mb || n <= nb) {
ztrmm(side, uplo, trans, diag, m, n, alpha, A, lda, B, ldb);
return CUDA_SUCCESS;
}
if (side == CBlasLeft) {
if (trans == CBlasNoTrans) {
if (uplo == CBlasUpper) {
size_t i = (m + mb - 1) & ~(mb - 1);
do {
i -= mb;
const size_t ib = min(mb, m - i);
CU_ERROR_CHECK(cuMultiGPUZgemm(handle, CBlasNoTrans, CBlasNoTrans, ib, n, m - i - ib, -one, &A[(i + ib) * lda + i], lda, &B[i + ib], ldb, alpha, &B[i], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
ztrmm(CBlasLeft, CBlasUpper, CBlasNoTrans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb);
} while (i > 0);
}
else {
for (size_t i = 0; i < m; i += mb) {
const size_t ib = min(mb, m - i);
CU_ERROR_CHECK(cuMultiGPUZgemm(handle, CBlasNoTrans, CBlasNoTrans, ib, n, i, -one, &A[i], lda, B, ldb, alpha, &B[i], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
ztrmm(CBlasLeft, CBlasLower, CBlasNoTrans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb);
}
}
}
else {
if (uplo == CBlasUpper) {
for (size_t i = 0; i < m; i += mb) {
const size_t ib = min(mb, m - i);
CU_ERROR_CHECK(cuMultiGPUZgemm(handle, trans, CBlasNoTrans, ib, n, i, -one, &A[i * lda], lda, B, ldb, alpha, &B[i], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
ztrmm(CBlasLeft, CBlasUpper, trans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb);
}
}
else {
size_t i = (m + mb - 1) & ~(mb - 1);
do {
i -= mb;
const size_t ib = min(mb, m - i);
CU_ERROR_CHECK(cuMultiGPUZgemm(handle, trans, CBlasNoTrans, ib, n, m - i - ib, -one, &A[i * lda + i + ib], lda, &B[i + ib], ldb, alpha, &B[i], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
ztrmm(CBlasLeft, CBlasLower, trans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb);
} while (i > 0);
}
}
}
else {
if (trans == CBlasNoTrans) {
if (uplo == CBlasUpper) {
for (size_t j = 0; j < n; j += nb) {
const size_t jb = min(nb, n - j);
CU_ERROR_CHECK(cuMultiGPUZgemm(handle, CBlasNoTrans, CBlasNoTrans, m, jb, j, -one, B, ldb, &A[j * lda], lda, alpha, &B[j * ldb], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
ztrmm(CBlasRight, CBlasUpper, CBlasNoTrans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb);
}
}
else {
size_t j = (n + nb - 1) & ~(nb - 1);
do {
j -= nb;
const size_t jb = min(nb, n - j);
CU_ERROR_CHECK(cuMultiGPUZgemm(handle, CBlasNoTrans, CBlasNoTrans, m, jb, n - j - jb, -one, &B[(j + jb) * ldb], ldb, &A[j * lda + j + jb], lda, alpha, &B[j * ldb], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
ztrmm(CBlasRight, CBlasLower, CBlasNoTrans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb);
} while (j > 0);
}
}
else {
if (uplo == CBlasUpper) {
size_t j = (n + nb - 1) & ~(nb - 1);
do {
j -= nb;
const size_t jb = min(nb, n - j);
CU_ERROR_CHECK(cuMultiGPUZgemm(handle, CBlasNoTrans, trans, m, jb, n - j - jb, -one, &B[(j + jb) * ldb], ldb, &A[(j + jb) * lda + j], lda, alpha, &B[j * ldb], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
ztrmm(CBlasRight, CBlasUpper, trans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb);
} while (j > 0);
}
else {
for (size_t j = 0; j < n; j += nb) {
const size_t jb = min(nb, n - j);
CU_ERROR_CHECK(cuMultiGPUZgemm(handle, CBlasNoTrans, trans, m, jb, j, -one, B, ldb, &A[j], lda, alpha, &B[j * ldb], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
ztrmm(CBlasRight, CBlasLower, trans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb);
}
}
}
}
return CUDA_SUCCESS;
}
|
coding_transform.h | // Copyright (c) 2020, Tobias Rapp
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the Karlsruhe Institute of Technology nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef MRAY_CODING_TRANSFORM_H
#define MRAY_CODING_TRANSFORM_H
#include "moment_image.h"
#include <algorithm>
/**
* Determine parameters for the coding transform, i.e. the min/max values per index to transform the moments.
*
* @return The absolute of the minimal value |min(l)| at each index l*2
* and the half range |max(l) - min(l) / 2| at each index l*2+1.
*/
inline vector<float> find_coding_transform_parameters(const MomentImageHost &img)
{
vector<float> params((img.num_moments - 1) * 2, 0.0f);
#pragma omp parallel for default(none), shared(img, params)
for (int l = 1; l < img.num_moments; ++l)
{
for (int y = 0; y < img.height; ++y)
{
for (int x = 0; x < img.width; ++x)
{
auto num_moments = img.get_num_moments(x, y);
if (num_moments < l)
continue;
auto idx = img.get_idx(x, y);
auto value = cut::clamp(img.data[idx + l], -1.0f, 1.0f);
params[(l - 1) * 2] = std::min(params[(l - 1) * 2], value);
params[(l - 1) * 2 + 1] = std::max(params[(l - 1) * 2 + 1], value);
}
}
}
for (int pidx = 0; pidx < img.num_moments - 1; ++pidx)
{
params[pidx * 2 + 1] = cut::abs(params[pidx * 2 + 1] - params[pidx * 2]) * 0.5f;
params[pidx * 2] = cut::abs(params[pidx * 2]);
}
return params;
}
#endif // MRAY_CODING_TRANSFORM_H
|
cp-tree.h | /* Definitions for C++ parsing and type checking.
Copyright (C) 1987-2018 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com)
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_CP_TREE_H
#define GCC_CP_TREE_H
#include "tm.h"
#include "hard-reg-set.h"
#include "function.h"
/* In order for the format checking to accept the C++ front end
diagnostic framework extensions, you must include this file before
diagnostic-core.h, not after. We override the definition of GCC_DIAG_STYLE
in c-common.h. */
#undef GCC_DIAG_STYLE
#define GCC_DIAG_STYLE __gcc_cxxdiag__
#if defined(GCC_DIAGNOSTIC_CORE_H) || defined (GCC_C_COMMON_H)
#error \
In order for the format checking to accept the C++ front end diagnostic \
framework extensions, you must include this file before diagnostic-core.h and \
c-common.h, not after.
#endif
#include "c-family/c-common.h"
#include "diagnostic.h"
/* A tree node, together with a location, so that we can track locations
(and ranges) during parsing.
The location is redundant for node kinds that have locations,
but not all node kinds do (e.g. constants, and references to
params, locals, etc), so we stash a copy here. */
class cp_expr
{
public:
cp_expr () :
m_value (NULL), m_loc (UNKNOWN_LOCATION) {}
cp_expr (tree value) :
m_value (value), m_loc (EXPR_LOCATION (m_value)) {}
cp_expr (tree value, location_t loc):
m_value (value), m_loc (loc) {}
cp_expr (const cp_expr &other) :
m_value (other.m_value), m_loc (other.m_loc) {}
/* Implicit conversions to tree. */
operator tree () const { return m_value; }
tree & operator* () { return m_value; }
tree operator* () const { return m_value; }
tree & operator-> () { return m_value; }
tree operator-> () const { return m_value; }
tree get_value () const { return m_value; }
location_t get_location () const { return m_loc; }
location_t get_start () const
{
source_range src_range = get_range_from_loc (line_table, m_loc);
return src_range.m_start;
}
location_t get_finish () const
{
source_range src_range = get_range_from_loc (line_table, m_loc);
return src_range.m_finish;
}
void set_location (location_t loc)
{
protected_set_expr_location (m_value, loc);
m_loc = loc;
}
void set_range (location_t start, location_t finish)
{
set_location (make_location (m_loc, start, finish));
}
cp_expr& maybe_add_location_wrapper ()
{
m_value = maybe_wrap_with_location (m_value, m_loc);
return *this;
}
private:
tree m_value;
location_t m_loc;
};
inline bool
operator == (const cp_expr &lhs, tree rhs)
{
return lhs.get_value () == rhs;
}
enum cp_tree_index
{
CPTI_WCHAR_DECL,
CPTI_VTABLE_ENTRY_TYPE,
CPTI_DELTA_TYPE,
CPTI_VTABLE_INDEX_TYPE,
CPTI_CLEANUP_TYPE,
CPTI_VTT_PARM_TYPE,
CPTI_CLASS_TYPE,
CPTI_UNKNOWN_TYPE,
CPTI_INIT_LIST_TYPE,
CPTI_VTBL_TYPE,
CPTI_VTBL_PTR_TYPE,
CPTI_STD,
CPTI_ABI,
CPTI_GLOBAL,
CPTI_GLOBAL_TYPE,
CPTI_CONST_TYPE_INFO_TYPE,
CPTI_TYPE_INFO_PTR_TYPE,
CPTI_ABORT_FNDECL,
CPTI_AGGR_TAG,
CPTI_CONV_OP_MARKER,
CPTI_CTOR_IDENTIFIER,
CPTI_COMPLETE_CTOR_IDENTIFIER,
CPTI_BASE_CTOR_IDENTIFIER,
CPTI_DTOR_IDENTIFIER,
CPTI_COMPLETE_DTOR_IDENTIFIER,
CPTI_BASE_DTOR_IDENTIFIER,
CPTI_DELETING_DTOR_IDENTIFIER,
CPTI_CONV_OP_IDENTIFIER,
CPTI_DELTA_IDENTIFIER,
CPTI_IN_CHARGE_IDENTIFIER,
CPTI_VTT_PARM_IDENTIFIER,
CPTI_THIS_IDENTIFIER,
CPTI_PFN_IDENTIFIER,
CPTI_VPTR_IDENTIFIER,
CPTI_GLOBAL_IDENTIFIER,
CPTI_STD_IDENTIFIER,
CPTI_ANON_IDENTIFIER,
CPTI_AUTO_IDENTIFIER,
CPTI_DECLTYPE_AUTO_IDENTIFIER,
CPTI_INIT_LIST_IDENTIFIER,
CPTI_LANG_NAME_C,
CPTI_LANG_NAME_CPLUSPLUS,
CPTI_EMPTY_EXCEPT_SPEC,
CPTI_NOEXCEPT_TRUE_SPEC,
CPTI_NOEXCEPT_FALSE_SPEC,
CPTI_NOEXCEPT_DEFERRED_SPEC,
CPTI_TERMINATE_FN,
CPTI_CALL_UNEXPECTED_FN,
CPTI_GET_EXCEPTION_PTR_FN,
CPTI_BEGIN_CATCH_FN,
CPTI_END_CATCH_FN,
CPTI_ALLOCATE_EXCEPTION_FN,
CPTI_FREE_EXCEPTION_FN,
CPTI_THROW_FN,
CPTI_RETHROW_FN,
CPTI_ATEXIT_FN_PTR_TYPE,
CPTI_ATEXIT,
CPTI_DSO_HANDLE,
CPTI_DCAST,
CPTI_NULLPTR,
CPTI_NULLPTR_TYPE,
CPTI_ALIGN_TYPE,
CPTI_ANY_TARG,
CPTI_MAX
};
extern GTY(()) tree cp_global_trees[CPTI_MAX];
#define wchar_decl_node cp_global_trees[CPTI_WCHAR_DECL]
#define vtable_entry_type cp_global_trees[CPTI_VTABLE_ENTRY_TYPE]
/* The type used to represent an offset by which to adjust the `this'
pointer in pointer-to-member types. */
#define delta_type_node cp_global_trees[CPTI_DELTA_TYPE]
/* The type used to represent an index into the vtable. */
#define vtable_index_type cp_global_trees[CPTI_VTABLE_INDEX_TYPE]
#define class_type_node cp_global_trees[CPTI_CLASS_TYPE]
#define unknown_type_node cp_global_trees[CPTI_UNKNOWN_TYPE]
#define init_list_type_node cp_global_trees[CPTI_INIT_LIST_TYPE]
#define vtbl_type_node cp_global_trees[CPTI_VTBL_TYPE]
#define vtbl_ptr_type_node cp_global_trees[CPTI_VTBL_PTR_TYPE]
#define std_node cp_global_trees[CPTI_STD]
#define abi_node cp_global_trees[CPTI_ABI]
#define global_namespace cp_global_trees[CPTI_GLOBAL]
#define global_type_node cp_global_trees[CPTI_GLOBAL_TYPE]
#define const_type_info_type_node cp_global_trees[CPTI_CONST_TYPE_INFO_TYPE]
#define type_info_ptr_type cp_global_trees[CPTI_TYPE_INFO_PTR_TYPE]
#define conv_op_marker cp_global_trees[CPTI_CONV_OP_MARKER]
#define abort_fndecl cp_global_trees[CPTI_ABORT_FNDECL]
#define current_aggr cp_global_trees[CPTI_AGGR_TAG]
#define nullptr_node cp_global_trees[CPTI_NULLPTR]
#define nullptr_type_node cp_global_trees[CPTI_NULLPTR_TYPE]
/* std::align_val_t */
#define align_type_node cp_global_trees[CPTI_ALIGN_TYPE]
/* We cache these tree nodes so as to call get_identifier less frequently.
For identifiers for functions, including special member functions such
as ctors and assignment operators, the nodes can be used (among other
things) to iterate over their overloads defined by/for a type. For
example:
tree ovlid = assign_op_identifier;
tree overloads = get_class_binding (type, ovlid);
for (ovl_iterator it (overloads); it; ++it) { ... }
iterates over the set of implicitly and explicitly defined overloads
of the assignment operator for type (including the copy and move
assignment operators, whether deleted or not). */
/* The name of a constructor that takes an in-charge parameter to
decide whether or not to construct virtual base classes. */
#define ctor_identifier cp_global_trees[CPTI_CTOR_IDENTIFIER]
/* The name of a constructor that constructs virtual base classes. */
#define complete_ctor_identifier cp_global_trees[CPTI_COMPLETE_CTOR_IDENTIFIER]
/* The name of a constructor that does not construct virtual base classes. */
#define base_ctor_identifier cp_global_trees[CPTI_BASE_CTOR_IDENTIFIER]
/* The name of a destructor that takes an in-charge parameter to
decide whether or not to destroy virtual base classes and whether
or not to delete the object. */
#define dtor_identifier cp_global_trees[CPTI_DTOR_IDENTIFIER]
/* The name of a destructor that destroys virtual base classes. */
#define complete_dtor_identifier cp_global_trees[CPTI_COMPLETE_DTOR_IDENTIFIER]
/* The name of a destructor that does not destroy virtual base
classes. */
#define base_dtor_identifier cp_global_trees[CPTI_BASE_DTOR_IDENTIFIER]
/* The name of a destructor that destroys virtual base classes, and
then deletes the entire object. */
#define deleting_dtor_identifier cp_global_trees[CPTI_DELETING_DTOR_IDENTIFIER]
#define ovl_op_identifier(ISASS, CODE) (OVL_OP_INFO(ISASS, CODE)->identifier)
#define assign_op_identifier (ovl_op_info[true][OVL_OP_NOP_EXPR].identifier)
#define call_op_identifier (ovl_op_info[false][OVL_OP_CALL_EXPR].identifier)
/* The name used for conversion operators -- but note that actual
conversion functions use special identifiers outside the identifier
table. */
#define conv_op_identifier cp_global_trees[CPTI_CONV_OP_IDENTIFIER]
#define delta_identifier cp_global_trees[CPTI_DELTA_IDENTIFIER]
#define in_charge_identifier cp_global_trees[CPTI_IN_CHARGE_IDENTIFIER]
/* The name of the parameter that contains a pointer to the VTT to use
for this subobject constructor or destructor. */
#define vtt_parm_identifier cp_global_trees[CPTI_VTT_PARM_IDENTIFIER]
#define this_identifier cp_global_trees[CPTI_THIS_IDENTIFIER]
#define pfn_identifier cp_global_trees[CPTI_PFN_IDENTIFIER]
#define vptr_identifier cp_global_trees[CPTI_VPTR_IDENTIFIER]
/* The name of the ::, std & anon namespaces. */
#define global_identifier cp_global_trees[CPTI_GLOBAL_IDENTIFIER]
#define std_identifier cp_global_trees[CPTI_STD_IDENTIFIER]
#define anon_identifier cp_global_trees[CPTI_ANON_IDENTIFIER]
/* auto and declspec(auto) identifiers. */
#define auto_identifier cp_global_trees[CPTI_AUTO_IDENTIFIER]
#define decltype_auto_identifier cp_global_trees[CPTI_DECLTYPE_AUTO_IDENTIFIER]
#define init_list_identifier cp_global_trees[CPTI_INIT_LIST_IDENTIFIER]
#define lang_name_c cp_global_trees[CPTI_LANG_NAME_C]
#define lang_name_cplusplus cp_global_trees[CPTI_LANG_NAME_CPLUSPLUS]
/* Exception specifiers used for throw(), noexcept(true),
noexcept(false) and deferred noexcept. We rely on these being
uncloned. */
#define empty_except_spec cp_global_trees[CPTI_EMPTY_EXCEPT_SPEC]
#define noexcept_true_spec cp_global_trees[CPTI_NOEXCEPT_TRUE_SPEC]
#define noexcept_false_spec cp_global_trees[CPTI_NOEXCEPT_FALSE_SPEC]
#define noexcept_deferred_spec cp_global_trees[CPTI_NOEXCEPT_DEFERRED_SPEC]
/* Exception handling function declarations. */
#define terminate_fn cp_global_trees[CPTI_TERMINATE_FN]
#define call_unexpected_fn cp_global_trees[CPTI_CALL_UNEXPECTED_FN]
#define get_exception_ptr_fn cp_global_trees[CPTI_GET_EXCEPTION_PTR_FN]
#define begin_catch_fn cp_global_trees[CPTI_BEGIN_CATCH_FN]
#define end_catch_fn cp_global_trees[CPTI_END_CATCH_FN]
#define allocate_exception_fn cp_global_trees[CPTI_ALLOCATE_EXCEPTION_FN]
#define free_exception_fn cp_global_trees[CPTI_FREE_EXCEPTION_FN]
#define throw_fn cp_global_trees[CPTI_THROW_FN]
#define rethrow_fn cp_global_trees[CPTI_RETHROW_FN]
/* The type of the function-pointer argument to "__cxa_atexit" (or
"std::atexit", if "__cxa_atexit" is not being used). */
#define atexit_fn_ptr_type_node cp_global_trees[CPTI_ATEXIT_FN_PTR_TYPE]
/* A pointer to `std::atexit'. */
#define atexit_node cp_global_trees[CPTI_ATEXIT]
/* A pointer to `__dso_handle'. */
#define dso_handle_node cp_global_trees[CPTI_DSO_HANDLE]
/* The declaration of the dynamic_cast runtime. */
#define dynamic_cast_node cp_global_trees[CPTI_DCAST]
/* The type of a destructor. */
#define cleanup_type cp_global_trees[CPTI_CLEANUP_TYPE]
/* The type of the vtt parameter passed to subobject constructors and
destructors. */
#define vtt_parm_type cp_global_trees[CPTI_VTT_PARM_TYPE]
/* A node which matches any template argument. */
#define any_targ_node cp_global_trees[CPTI_ANY_TARG]
/* Node to indicate default access. This must be distinct from the
access nodes in tree.h. */
#define access_default_node null_node
#include "name-lookup.h"
/* Usage of TREE_LANG_FLAG_?:
0: IDENTIFIER_KIND_BIT_0 (in IDENTIFIER_NODE)
NEW_EXPR_USE_GLOBAL (in NEW_EXPR).
COND_EXPR_IS_VEC_DELETE (in COND_EXPR).
DELETE_EXPR_USE_GLOBAL (in DELETE_EXPR).
COMPOUND_EXPR_OVERLOADED (in COMPOUND_EXPR).
CLEANUP_P (in TRY_BLOCK)
AGGR_INIT_VIA_CTOR_P (in AGGR_INIT_EXPR)
PTRMEM_OK_P (in ADDR_EXPR, OFFSET_REF, SCOPE_REF)
PAREN_STRING_LITERAL (in STRING_CST)
CP_DECL_THREAD_LOCAL_P (in VAR_DECL)
KOENIG_LOOKUP_P (in CALL_EXPR)
STATEMENT_LIST_NO_SCOPE (in STATEMENT_LIST).
EXPR_STMT_STMT_EXPR_RESULT (in EXPR_STMT)
STMT_EXPR_NO_SCOPE (in STMT_EXPR)
BIND_EXPR_TRY_BLOCK (in BIND_EXPR)
TYPENAME_IS_ENUM_P (in TYPENAME_TYPE)
OMP_FOR_GIMPLIFYING_P (in OMP_FOR, OMP_SIMD, OMP_DISTRIBUTE,
and OMP_TASKLOOP)
BASELINK_QUALIFIED_P (in BASELINK)
TARGET_EXPR_IMPLICIT_P (in TARGET_EXPR)
TEMPLATE_PARM_PARAMETER_PACK (in TEMPLATE_PARM_INDEX)
ATTR_IS_DEPENDENT (in the TREE_LIST for an attribute)
ABI_TAG_IMPLICIT (in the TREE_LIST for the argument of abi_tag)
LAMBDA_CAPTURE_EXPLICIT_P (in a TREE_LIST in LAMBDA_EXPR_CAPTURE_LIST)
CONSTRUCTOR_IS_DIRECT_INIT (in CONSTRUCTOR)
LAMBDA_EXPR_CAPTURES_THIS_P (in LAMBDA_EXPR)
DECLTYPE_FOR_LAMBDA_CAPTURE (in DECLTYPE_TYPE)
VEC_INIT_EXPR_IS_CONSTEXPR (in VEC_INIT_EXPR)
DECL_OVERRIDE_P (in FUNCTION_DECL)
IMPLICIT_CONV_EXPR_DIRECT_INIT (in IMPLICIT_CONV_EXPR)
TRANSACTION_EXPR_IS_STMT (in TRANSACTION_EXPR)
CONVERT_EXPR_VBASE_PATH (in CONVERT_EXPR)
PACK_EXPANSION_LOCAL_P (in *_PACK_EXPANSION)
TINFO_HAS_ACCESS_ERRORS (in TEMPLATE_INFO)
SIZEOF_EXPR_TYPE_P (in SIZEOF_EXPR)
COMPOUND_REQ_NOEXCEPT_P (in COMPOUND_REQ)
WILDCARD_PACK_P (in WILDCARD_DECL)
BLOCK_OUTER_CURLY_BRACE_P (in BLOCK)
FOLD_EXPR_MODOP_P (*_FOLD_EXPR)
IF_STMT_CONSTEXPR_P (IF_STMT)
TEMPLATE_TYPE_PARM_FOR_CLASS (TEMPLATE_TYPE_PARM)
DECL_NAMESPACE_INLINE_P (in NAMESPACE_DECL)
SWITCH_STMT_ALL_CASES_P (in SWITCH_STMT)
REINTERPRET_CAST_P (in NOP_EXPR)
ALIGNOF_EXPR_STD_P (in ALIGNOF_EXPR)
1: IDENTIFIER_KIND_BIT_1 (in IDENTIFIER_NODE)
TI_PENDING_TEMPLATE_FLAG.
TEMPLATE_PARMS_FOR_INLINE.
DELETE_EXPR_USE_VEC (in DELETE_EXPR).
(TREE_CALLS_NEW) (in _EXPR or _REF) (commented-out).
ICS_ELLIPSIS_FLAG (in _CONV)
DECL_INITIALIZED_P (in VAR_DECL)
TYPENAME_IS_CLASS_P (in TYPENAME_TYPE)
STMT_IS_FULL_EXPR_P (in _STMT)
TARGET_EXPR_LIST_INIT_P (in TARGET_EXPR)
LAMBDA_EXPR_MUTABLE_P (in LAMBDA_EXPR)
DECL_FINAL_P (in FUNCTION_DECL)
QUALIFIED_NAME_IS_TEMPLATE (in SCOPE_REF)
DECLTYPE_FOR_INIT_CAPTURE (in DECLTYPE_TYPE)
CONSTRUCTOR_NO_IMPLICIT_ZERO (in CONSTRUCTOR)
TINFO_USED_TEMPLATE_ID (in TEMPLATE_INFO)
PACK_EXPANSION_SIZEOF_P (in *_PACK_EXPANSION)
OVL_USING_P (in OVERLOAD)
IMPLICIT_CONV_EXPR_NONTYPE_ARG (in IMPLICIT_CONV_EXPR)
2: IDENTIFIER_KIND_BIT_2 (in IDENTIFIER_NODE)
ICS_THIS_FLAG (in _CONV)
DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (in VAR_DECL)
STATEMENT_LIST_TRY_BLOCK (in STATEMENT_LIST)
TYPENAME_IS_RESOLVING_P (in TYPE_NAME_TYPE)
TARGET_EXPR_DIRECT_INIT_P (in TARGET_EXPR)
FNDECL_USED_AUTO (in FUNCTION_DECL)
DECLTYPE_FOR_LAMBDA_PROXY (in DECLTYPE_TYPE)
REF_PARENTHESIZED_P (in COMPONENT_REF, INDIRECT_REF, SCOPE_REF)
AGGR_INIT_ZERO_FIRST (in AGGR_INIT_EXPR)
CONSTRUCTOR_MUTABLE_POISON (in CONSTRUCTOR)
OVL_HIDDEN_P (in OVERLOAD)
SWITCH_STMT_NO_BREAK_P (in SWITCH_STMT)
LAMBDA_EXPR_CAPTURE_OPTIMIZED (in LAMBDA_EXPR)
3: (TREE_REFERENCE_EXPR) (in NON_LVALUE_EXPR) (commented-out).
ICS_BAD_FLAG (in _CONV)
FN_TRY_BLOCK_P (in TRY_BLOCK)
BIND_EXPR_BODY_BLOCK (in BIND_EXPR)
DECL_NONTRIVIALLY_INITIALIZED_P (in VAR_DECL)
CALL_EXPR_ORDERED_ARGS (in CALL_EXPR, AGGR_INIT_EXPR)
DECLTYPE_FOR_REF_CAPTURE (in DECLTYPE_TYPE)
CONSTRUCTOR_C99_COMPOUND_LITERAL (in CONSTRUCTOR)
OVL_NESTED_P (in OVERLOAD)
4: IDENTIFIER_MARKED (IDENTIFIER_NODEs)
TREE_HAS_CONSTRUCTOR (in INDIRECT_REF, SAVE_EXPR, CONSTRUCTOR,
CALL_EXPR, or FIELD_DECL).
DECL_TINFO_P (in VAR_DECL)
FUNCTION_REF_QUALIFIED (in FUNCTION_TYPE, METHOD_TYPE)
OVL_LOOKUP_P (in OVERLOAD)
LOOKUP_FOUND_P (in RECORD_TYPE, UNION_TYPE, NAMESPACE_DECL)
5: IDENTIFIER_VIRTUAL_P (in IDENTIFIER_NODE)
DECL_VTABLE_OR_VTT_P (in VAR_DECL)
FUNCTION_RVALUE_QUALIFIED (in FUNCTION_TYPE, METHOD_TYPE)
CALL_EXPR_REVERSE_ARGS (in CALL_EXPR, AGGR_INIT_EXPR)
CONSTRUCTOR_PLACEHOLDER_BOUNDARY (in CONSTRUCTOR)
6: IDENTIFIER_REPO_CHOSEN (in IDENTIFIER_NODE)
DECL_CONSTRUCTION_VTABLE_P (in VAR_DECL)
TYPE_MARKED_P (in _TYPE)
RANGE_FOR_IVDEP (in RANGE_FOR_STMT)
CALL_EXPR_OPERATOR_SYNTAX (in CALL_EXPR, AGGR_INIT_EXPR)
Usage of TYPE_LANG_FLAG_?:
0: TYPE_DEPENDENT_P
1: TYPE_HAS_USER_CONSTRUCTOR.
2: TYPE_HAS_LATE_RETURN_TYPE (in FUNCTION_TYPE, METHOD_TYPE)
TYPE_PTRMEMFUNC_FLAG (in RECORD_TYPE)
4: TYPE_HAS_NONTRIVIAL_DESTRUCTOR
5: CLASS_TYPE_P (in RECORD_TYPE and UNION_TYPE)
ENUM_FIXED_UNDERLYING_TYPE_P (in ENUMERAL_TYPE)
AUTO_IS_DECLTYPE (in TEMPLATE_TYPE_PARM)
REFERENCE_VLA_OK (in REFERENCE_TYPE)
6: TYPE_DEPENDENT_P_VALID
Usage of DECL_LANG_FLAG_?:
0: DECL_ERROR_REPORTED (in VAR_DECL).
DECL_TEMPLATE_PARM_P (in PARM_DECL, CONST_DECL, TYPE_DECL, or TEMPLATE_DECL)
DECL_LOCAL_FUNCTION_P (in FUNCTION_DECL)
DECL_MUTABLE_P (in FIELD_DECL)
DECL_DEPENDENT_P (in USING_DECL)
LABEL_DECL_BREAK (in LABEL_DECL)
1: C_TYPEDEF_EXPLICITLY_SIGNED (in TYPE_DECL).
DECL_TEMPLATE_INSTANTIATED (in a VAR_DECL or a FUNCTION_DECL)
DECL_MEMBER_TEMPLATE_P (in TEMPLATE_DECL)
USING_DECL_TYPENAME_P (in USING_DECL)
DECL_VLA_CAPTURE_P (in FIELD_DECL)
DECL_ARRAY_PARAMETER_P (in PARM_DECL)
LABEL_DECL_CONTINUE (in LABEL_DECL)
2: DECL_THIS_EXTERN (in VAR_DECL or FUNCTION_DECL).
DECL_IMPLICIT_TYPEDEF_P (in a TYPE_DECL)
DECL_CONSTRAINT_VAR_P (in a PARM_DECL)
TEMPLATE_DECL_COMPLEX_ALIAS_P (in TEMPLATE_DECL)
DECL_INSTANTIATING_NSDMI_P (in a FIELD_DECL)
LABEL_DECL_CDTOR (in LABEL_DECL)
3: DECL_IN_AGGR_P.
4: DECL_C_BIT_FIELD (in a FIELD_DECL)
DECL_ANON_UNION_VAR_P (in a VAR_DECL)
DECL_SELF_REFERENCE_P (in a TYPE_DECL)
DECL_INVALID_OVERRIDER_P (in a FUNCTION_DECL)
5: DECL_INTERFACE_KNOWN.
6: DECL_THIS_STATIC (in VAR_DECL or FUNCTION_DECL).
DECL_FIELD_IS_BASE (in FIELD_DECL)
TYPE_DECL_ALIAS_P (in TYPE_DECL)
7: DECL_DEAD_FOR_LOCAL (in VAR_DECL).
DECL_THUNK_P (in a member FUNCTION_DECL)
DECL_NORMAL_CAPTURE_P (in FIELD_DECL)
8: DECL_DECLARED_CONSTEXPR_P (in VAR_DECL, FUNCTION_DECL)
Usage of language-independent fields in a language-dependent manner:
TYPE_ALIAS_SET
This field is used by TYPENAME_TYPEs, TEMPLATE_TYPE_PARMs, and so
forth as a substitute for the mark bits provided in `lang_type'.
At present, only the six low-order bits are used.
TYPE_LANG_SLOT_1
For a FUNCTION_TYPE or METHOD_TYPE, this is TYPE_RAISES_EXCEPTIONS.
For a POINTER_TYPE (to a METHOD_TYPE), this is TYPE_PTRMEMFUNC_TYPE.
For an ENUMERAL_TYPE, BOUND_TEMPLATE_TEMPLATE_PARM_TYPE,
RECORD_TYPE or UNION_TYPE this is TYPE_TEMPLATE_INFO,
BINFO_VIRTUALS
For a binfo, this is a TREE_LIST. There is an entry for each
virtual function declared either in BINFO or its direct and
indirect primary bases.
The BV_DELTA of each node gives the amount by which to adjust the
`this' pointer when calling the function. If the method is an
overridden version of a base class method, then it is assumed
that, prior to adjustment, the this pointer points to an object
of the base class.
The BV_VCALL_INDEX of each node, if non-NULL, gives the vtable
index of the vcall offset for this entry.
The BV_FN is the declaration for the virtual function itself.
If BV_LOST_PRIMARY is set, it means that this entry is for a lost
primary virtual base and can be left null in the vtable.
BINFO_VTABLE
This is an expression with POINTER_TYPE that gives the value
to which the vptr should be initialized. Use get_vtbl_decl_for_binfo
to extract the VAR_DECL for the complete vtable.
DECL_VINDEX
This field is NULL for a non-virtual function. For a virtual
function, it is eventually set to an INTEGER_CST indicating the
index in the vtable at which this function can be found. When
a virtual function is declared, but before it is known what
function is overridden, this field is the error_mark_node.
Temporarily, it may be set to a TREE_LIST whose TREE_VALUE is
the virtual function this one overrides, and whose TREE_CHAIN is
the old DECL_VINDEX. */
/* Language-specific tree checkers. */
#define VAR_OR_FUNCTION_DECL_CHECK(NODE) \
TREE_CHECK2(NODE,VAR_DECL,FUNCTION_DECL)
#define TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK(NODE) \
TREE_CHECK3(NODE,TYPE_DECL,TEMPLATE_DECL,FUNCTION_DECL)
#define TYPE_FUNCTION_OR_TEMPLATE_DECL_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL || TREE_CODE (NODE) == TEMPLATE_DECL \
|| TREE_CODE (NODE) == FUNCTION_DECL)
#define VAR_FUNCTION_OR_PARM_DECL_CHECK(NODE) \
TREE_CHECK3(NODE,VAR_DECL,FUNCTION_DECL,PARM_DECL)
#define VAR_TEMPL_TYPE_OR_FUNCTION_DECL_CHECK(NODE) \
TREE_CHECK4(NODE,VAR_DECL,FUNCTION_DECL,TYPE_DECL,TEMPLATE_DECL)
#define VAR_TEMPL_TYPE_FIELD_OR_FUNCTION_DECL_CHECK(NODE) \
TREE_CHECK5(NODE,VAR_DECL,FIELD_DECL,FUNCTION_DECL,TYPE_DECL,TEMPLATE_DECL)
#define BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK(NODE) \
TREE_CHECK(NODE,BOUND_TEMPLATE_TEMPLATE_PARM)
#if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007)
#define THUNK_FUNCTION_CHECK(NODE) __extension__ \
({ __typeof (NODE) const __t = (NODE); \
if (TREE_CODE (__t) != FUNCTION_DECL || !__t->decl_common.lang_specific \
|| !__t->decl_common.lang_specific->u.fn.thunk_p) \
tree_check_failed (__t, __FILE__, __LINE__, __FUNCTION__, 0); \
__t; })
#else
#define THUNK_FUNCTION_CHECK(NODE) (NODE)
#endif
/* Language-dependent contents of an identifier. */
struct GTY(()) lang_identifier {
struct c_common_identifier c_common;
cxx_binding *bindings;
};
/* Return a typed pointer version of T if it designates a
C++ front-end identifier. */
inline lang_identifier*
identifier_p (tree t)
{
if (TREE_CODE (t) == IDENTIFIER_NODE)
return (lang_identifier*) t;
return NULL;
}
#define LANG_IDENTIFIER_CAST(NODE) \
((struct lang_identifier*)IDENTIFIER_NODE_CHECK (NODE))
struct GTY(()) template_parm_index {
struct tree_common common;
int index;
int level;
int orig_level;
tree decl;
};
struct GTY(()) ptrmem_cst {
struct tree_common common;
tree member;
};
typedef struct ptrmem_cst * ptrmem_cst_t;
#define CLEANUP_P(NODE) TREE_LANG_FLAG_0 (TRY_BLOCK_CHECK (NODE))
#define BIND_EXPR_TRY_BLOCK(NODE) \
TREE_LANG_FLAG_0 (BIND_EXPR_CHECK (NODE))
/* Used to mark the block around the member initializers and cleanups. */
#define BIND_EXPR_BODY_BLOCK(NODE) \
TREE_LANG_FLAG_3 (BIND_EXPR_CHECK (NODE))
#define FUNCTION_NEEDS_BODY_BLOCK(NODE) \
(DECL_CONSTRUCTOR_P (NODE) || DECL_DESTRUCTOR_P (NODE) \
|| LAMBDA_FUNCTION_P (NODE))
#define STATEMENT_LIST_NO_SCOPE(NODE) \
TREE_LANG_FLAG_0 (STATEMENT_LIST_CHECK (NODE))
#define STATEMENT_LIST_TRY_BLOCK(NODE) \
TREE_LANG_FLAG_2 (STATEMENT_LIST_CHECK (NODE))
/* Mark the outer curly brace BLOCK. */
#define BLOCK_OUTER_CURLY_BRACE_P(NODE) TREE_LANG_FLAG_0 (BLOCK_CHECK (NODE))
/* Nonzero if this statement should be considered a full-expression,
i.e., if temporaries created during this statement should have
their destructors run at the end of this statement. */
#define STMT_IS_FULL_EXPR_P(NODE) TREE_LANG_FLAG_1 ((NODE))
/* Marks the result of a statement expression. */
#define EXPR_STMT_STMT_EXPR_RESULT(NODE) \
TREE_LANG_FLAG_0 (EXPR_STMT_CHECK (NODE))
/* Nonzero if this statement-expression does not have an associated scope. */
#define STMT_EXPR_NO_SCOPE(NODE) \
TREE_LANG_FLAG_0 (STMT_EXPR_CHECK (NODE))
#define COND_EXPR_IS_VEC_DELETE(NODE) \
TREE_LANG_FLAG_0 (COND_EXPR_CHECK (NODE))
/* Nonzero if this NOP_EXPR is a reinterpret_cast. Such conversions
are not constexprs. Other NOP_EXPRs are. */
#define REINTERPRET_CAST_P(NODE) \
TREE_LANG_FLAG_0 (NOP_EXPR_CHECK (NODE))
/* Returns nonzero iff TYPE1 and TYPE2 are the same type, in the usual
sense of `same'. */
#define same_type_p(TYPE1, TYPE2) \
comptypes ((TYPE1), (TYPE2), COMPARE_STRICT)
/* Returns nonzero iff NODE is a declaration for the global function
`main'. */
#define DECL_MAIN_P(NODE) \
(DECL_EXTERN_C_FUNCTION_P (NODE) \
&& DECL_NAME (NODE) != NULL_TREE \
&& MAIN_NAME_P (DECL_NAME (NODE)) \
&& flag_hosted)
/* Lookup walker marking. */
#define LOOKUP_SEEN_P(NODE) TREE_VISITED(NODE)
#define LOOKUP_FOUND_P(NODE) \
TREE_LANG_FLAG_4 (TREE_CHECK3(NODE,RECORD_TYPE,UNION_TYPE,NAMESPACE_DECL))
/* These two accessors should only be used by OVL manipulators.
Other users should use iterators and convenience functions. */
#define OVL_FUNCTION(NODE) \
(((struct tree_overload*)OVERLOAD_CHECK (NODE))->function)
#define OVL_CHAIN(NODE) \
(((struct tree_overload*)OVERLOAD_CHECK (NODE))->common.chain)
/* If set, this was imported in a using declaration. */
#define OVL_USING_P(NODE) TREE_LANG_FLAG_1 (OVERLOAD_CHECK (NODE))
/* If set, this overload is a hidden decl. */
#define OVL_HIDDEN_P(NODE) TREE_LANG_FLAG_2 (OVERLOAD_CHECK (NODE))
/* If set, this overload contains a nested overload. */
#define OVL_NESTED_P(NODE) TREE_LANG_FLAG_3 (OVERLOAD_CHECK (NODE))
/* If set, this overload was constructed during lookup. */
#define OVL_LOOKUP_P(NODE) TREE_LANG_FLAG_4 (OVERLOAD_CHECK (NODE))
/* If set, this is a persistant lookup. */
#define OVL_USED_P(NODE) TREE_USED (OVERLOAD_CHECK (NODE))
/* The first decl of an overload. */
#define OVL_FIRST(NODE) ovl_first (NODE)
/* The name of the overload set. */
#define OVL_NAME(NODE) DECL_NAME (OVL_FIRST (NODE))
/* Whether this is a set of overloaded functions. TEMPLATE_DECLS are
always wrapped in an OVERLOAD, so we don't need to check them
here. */
#define OVL_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL || TREE_CODE (NODE) == OVERLOAD)
/* Whether this is a single member overload. */
#define OVL_SINGLE_P(NODE) \
(TREE_CODE (NODE) != OVERLOAD || !OVL_CHAIN (NODE))
/* OVL_HIDDEN_P nodes come first, then OVL_USING_P nodes, then regular
fns. */
struct GTY(()) tree_overload {
struct tree_common common;
tree function;
};
/* Iterator for a 1 dimensional overload. Permits iterating over the
outer level of a 2-d overload when explicitly enabled. */
class ovl_iterator
{
tree ovl;
const bool allow_inner; /* Only used when checking. */
public:
explicit ovl_iterator (tree o, bool allow = false)
: ovl (o), allow_inner (allow)
{
}
private:
/* Do not duplicate. */
ovl_iterator &operator= (const ovl_iterator &);
ovl_iterator (const ovl_iterator &);
public:
operator bool () const
{
return ovl;
}
ovl_iterator &operator++ ()
{
ovl = TREE_CODE (ovl) != OVERLOAD ? NULL_TREE : OVL_CHAIN (ovl);
return *this;
}
tree operator* () const
{
tree fn = TREE_CODE (ovl) != OVERLOAD ? ovl : OVL_FUNCTION (ovl);
/* Check this is not an unexpected 2-dimensional overload. */
gcc_checking_assert (allow_inner || TREE_CODE (fn) != OVERLOAD);
return fn;
}
public:
/* Whether this overload was introduced by a using decl. */
bool using_p () const
{
return TREE_CODE (ovl) == OVERLOAD && OVL_USING_P (ovl);
}
bool hidden_p () const
{
return TREE_CODE (ovl) == OVERLOAD && OVL_HIDDEN_P (ovl);
}
public:
tree remove_node (tree head)
{
return remove_node (head, ovl);
}
tree reveal_node (tree head)
{
return reveal_node (head, ovl);
}
protected:
/* If we have a nested overload, point at the inner overload and
return the next link on the outer one. */
tree maybe_push ()
{
tree r = NULL_TREE;
if (ovl && TREE_CODE (ovl) == OVERLOAD && OVL_NESTED_P (ovl))
{
r = OVL_CHAIN (ovl);
ovl = OVL_FUNCTION (ovl);
}
return r;
}
/* Restore an outer nested overload. */
void pop (tree outer)
{
gcc_checking_assert (!ovl);
ovl = outer;
}
private:
/* We make these static functions to avoid the address of the
iterator escaping the local context. */
static tree remove_node (tree head, tree node);
static tree reveal_node (tree ovl, tree node);
};
/* Iterator over a (potentially) 2 dimensional overload, which is
produced by name lookup. */
class lkp_iterator : public ovl_iterator
{
typedef ovl_iterator parent;
tree outer;
public:
explicit lkp_iterator (tree o)
: parent (o, true), outer (maybe_push ())
{
}
public:
lkp_iterator &operator++ ()
{
bool repush = !outer;
if (!parent::operator++ () && !repush)
{
pop (outer);
repush = true;
}
if (repush)
outer = maybe_push ();
return *this;
}
};
/* hash traits for declarations. Hashes potential overload sets via
DECL_NAME. */
struct named_decl_hash : ggc_remove <tree>
{
typedef tree value_type; /* A DECL or OVERLOAD */
typedef tree compare_type; /* An identifier. */
inline static hashval_t hash (const value_type decl);
inline static bool equal (const value_type existing, compare_type candidate);
static inline void mark_empty (value_type &p) {p = NULL_TREE;}
static inline bool is_empty (value_type p) {return !p;}
/* Nothing is deletable. Everything is insertable. */
static bool is_deleted (value_type) { return false; }
static void mark_deleted (value_type) { gcc_unreachable (); }
};
struct GTY(()) tree_template_decl {
struct tree_decl_common common;
tree arguments;
tree result;
};
/* Returns true iff NODE is a BASELINK. */
#define BASELINK_P(NODE) \
(TREE_CODE (NODE) == BASELINK)
/* The BINFO indicating the base in which lookup found the
BASELINK_FUNCTIONS. */
#define BASELINK_BINFO(NODE) \
(((struct tree_baselink*) BASELINK_CHECK (NODE))->binfo)
/* The functions referred to by the BASELINK; either a FUNCTION_DECL,
a TEMPLATE_DECL, an OVERLOAD, or a TEMPLATE_ID_EXPR. */
#define BASELINK_FUNCTIONS(NODE) \
(((struct tree_baselink*) BASELINK_CHECK (NODE))->functions)
/* If T is a BASELINK, grab the functions, otherwise just T, which is
expected to already be a (list of) functions. */
#define MAYBE_BASELINK_FUNCTIONS(T) \
(BASELINK_P (T) ? BASELINK_FUNCTIONS (T) : T)
/* The BINFO in which the search for the functions indicated by this baselink
began. This base is used to determine the accessibility of functions
selected by overload resolution. */
#define BASELINK_ACCESS_BINFO(NODE) \
(((struct tree_baselink*) BASELINK_CHECK (NODE))->access_binfo)
/* For a type-conversion operator, the BASELINK_OPTYPE indicates the type
to which the conversion should occur. This value is important if
the BASELINK_FUNCTIONS include a template conversion operator --
the BASELINK_OPTYPE can be used to determine what type the user
requested. */
#define BASELINK_OPTYPE(NODE) \
(TREE_CHAIN (BASELINK_CHECK (NODE)))
/* Nonzero if this baselink was from a qualified lookup. */
#define BASELINK_QUALIFIED_P(NODE) \
TREE_LANG_FLAG_0 (BASELINK_CHECK (NODE))
struct GTY(()) tree_baselink {
struct tree_common common;
tree binfo;
tree functions;
tree access_binfo;
};
/* The different kinds of ids that we encounter. */
enum cp_id_kind
{
/* Not an id at all. */
CP_ID_KIND_NONE,
/* An unqualified-id that is not a template-id. */
CP_ID_KIND_UNQUALIFIED,
/* An unqualified-id that is a dependent name. */
CP_ID_KIND_UNQUALIFIED_DEPENDENT,
/* An unqualified template-id. */
CP_ID_KIND_TEMPLATE_ID,
/* A qualified-id. */
CP_ID_KIND_QUALIFIED
};
/* The various kinds of C++0x warnings we encounter. */
enum cpp0x_warn_str
{
/* extended initializer lists */
CPP0X_INITIALIZER_LISTS,
/* explicit conversion operators */
CPP0X_EXPLICIT_CONVERSION,
/* variadic templates */
CPP0X_VARIADIC_TEMPLATES,
/* lambda expressions */
CPP0X_LAMBDA_EXPR,
/* C++0x auto */
CPP0X_AUTO,
/* scoped enums */
CPP0X_SCOPED_ENUMS,
/* defaulted and deleted functions */
CPP0X_DEFAULTED_DELETED,
/* inline namespaces */
CPP0X_INLINE_NAMESPACES,
/* override controls, override/final */
CPP0X_OVERRIDE_CONTROLS,
/* non-static data member initializers */
CPP0X_NSDMI,
/* user defined literals */
CPP0X_USER_DEFINED_LITERALS,
/* delegating constructors */
CPP0X_DELEGATING_CTORS,
/* inheriting constructors */
CPP0X_INHERITING_CTORS,
/* C++11 attributes */
CPP0X_ATTRIBUTES,
/* ref-qualified member functions */
CPP0X_REF_QUALIFIER
};
/* The various kinds of operation used by composite_pointer_type. */
enum composite_pointer_operation
{
/* comparison */
CPO_COMPARISON,
/* conversion */
CPO_CONVERSION,
/* conditional expression */
CPO_CONDITIONAL_EXPR
};
/* Possible cases of expression list used by build_x_compound_expr_from_list. */
enum expr_list_kind {
ELK_INIT, /* initializer */
ELK_MEM_INIT, /* member initializer */
ELK_FUNC_CAST /* functional cast */
};
/* Possible cases of implicit bad rhs conversions. */
enum impl_conv_rhs {
ICR_DEFAULT_ARGUMENT, /* default argument */
ICR_CONVERTING, /* converting */
ICR_INIT, /* initialization */
ICR_ARGPASS, /* argument passing */
ICR_RETURN, /* return */
ICR_ASSIGN /* assignment */
};
/* Possible cases of implicit or explicit bad conversions to void. */
enum impl_conv_void {
ICV_CAST, /* (explicit) conversion to void */
ICV_SECOND_OF_COND, /* second operand of conditional expression */
ICV_THIRD_OF_COND, /* third operand of conditional expression */
ICV_RIGHT_OF_COMMA, /* right operand of comma operator */
ICV_LEFT_OF_COMMA, /* left operand of comma operator */
ICV_STATEMENT, /* statement */
ICV_THIRD_IN_FOR /* for increment expression */
};
/* Possible invalid uses of an abstract class that might not have a
specific associated declaration. */
enum GTY(()) abstract_class_use {
ACU_UNKNOWN, /* unknown or decl provided */
ACU_CAST, /* cast to abstract class */
ACU_NEW, /* new-expression of abstract class */
ACU_THROW, /* throw-expression of abstract class */
ACU_CATCH, /* catch-parameter of abstract class */
ACU_ARRAY, /* array of abstract class */
ACU_RETURN, /* return type of abstract class */
ACU_PARM /* parameter type of abstract class */
};
/* Macros for access to language-specific slots in an identifier. */
/* The IDENTIFIER_BINDING is the innermost cxx_binding for the
identifier. Its PREVIOUS is the next outermost binding. Each
VALUE field is a DECL for the associated declaration. Thus,
name lookup consists simply of pulling off the node at the front
of the list (modulo oddities for looking up the names of types,
and such.) You can use SCOPE field to determine the scope
that bound the name. */
#define IDENTIFIER_BINDING(NODE) \
(LANG_IDENTIFIER_CAST (NODE)->bindings)
/* TREE_TYPE only indicates on local and class scope the current
type. For namespace scope, the presence of a type in any namespace
is indicated with global_type_node, and the real type behind must
be found through lookup. */
#define IDENTIFIER_TYPE_VALUE(NODE) identifier_type_value (NODE)
#define REAL_IDENTIFIER_TYPE_VALUE(NODE) TREE_TYPE (NODE)
#define SET_IDENTIFIER_TYPE_VALUE(NODE,TYPE) (TREE_TYPE (NODE) = (TYPE))
#define IDENTIFIER_HAS_TYPE_VALUE(NODE) (IDENTIFIER_TYPE_VALUE (NODE) ? 1 : 0)
/* Kinds of identifiers. Values are carefully chosen. */
enum cp_identifier_kind {
cik_normal = 0, /* Not a special identifier. */
cik_keyword = 1, /* A keyword. */
cik_ctor = 2, /* Constructor (in-chg, complete or base). */
cik_dtor = 3, /* Destructor (in-chg, deleting, complete or
base). */
cik_simple_op = 4, /* Non-assignment operator name. */
cik_assign_op = 5, /* An assignment operator name. */
cik_conv_op = 6, /* Conversion operator name. */
cik_reserved_for_udlit = 7, /* Not yet in use */
cik_max
};
/* Kind bits. */
#define IDENTIFIER_KIND_BIT_0(NODE) \
TREE_LANG_FLAG_0 (IDENTIFIER_NODE_CHECK (NODE))
#define IDENTIFIER_KIND_BIT_1(NODE) \
TREE_LANG_FLAG_1 (IDENTIFIER_NODE_CHECK (NODE))
#define IDENTIFIER_KIND_BIT_2(NODE) \
TREE_LANG_FLAG_2 (IDENTIFIER_NODE_CHECK (NODE))
/* Used by various search routines. */
#define IDENTIFIER_MARKED(NODE) \
TREE_LANG_FLAG_4 (IDENTIFIER_NODE_CHECK (NODE))
/* Nonzero if this identifier is used as a virtual function name somewhere
(optimizes searches). */
#define IDENTIFIER_VIRTUAL_P(NODE) \
TREE_LANG_FLAG_5 (IDENTIFIER_NODE_CHECK (NODE))
/* True iff NAME is the DECL_ASSEMBLER_NAME for an entity with vague
linkage which the prelinker has assigned to this translation
unit. */
#define IDENTIFIER_REPO_CHOSEN(NAME) \
(TREE_LANG_FLAG_6 (IDENTIFIER_NODE_CHECK (NAME)))
/* True if this identifier is a reserved word. C_RID_CODE (node) is
then the RID_* value of the keyword. Value 1. */
#define IDENTIFIER_KEYWORD_P(NODE) \
((!IDENTIFIER_KIND_BIT_2 (NODE)) \
& (!IDENTIFIER_KIND_BIT_1 (NODE)) \
& IDENTIFIER_KIND_BIT_0 (NODE))
/* True if this identifier is the name of a constructor or
destructor. Value 2 or 3. */
#define IDENTIFIER_CDTOR_P(NODE) \
((!IDENTIFIER_KIND_BIT_2 (NODE)) \
& IDENTIFIER_KIND_BIT_1 (NODE))
/* True if this identifier is the name of a constructor. Value 2. */
#define IDENTIFIER_CTOR_P(NODE) \
(IDENTIFIER_CDTOR_P(NODE) \
& (!IDENTIFIER_KIND_BIT_0 (NODE)))
/* True if this identifier is the name of a destructor. Value 3. */
#define IDENTIFIER_DTOR_P(NODE) \
(IDENTIFIER_CDTOR_P(NODE) \
& IDENTIFIER_KIND_BIT_0 (NODE))
/* True if this identifier is for any operator name (including
conversions). Value 4, 5, 6 or 7. */
#define IDENTIFIER_ANY_OP_P(NODE) \
(IDENTIFIER_KIND_BIT_2 (NODE))
/* True if this identifier is for an overloaded operator. Values 4, 5. */
#define IDENTIFIER_OVL_OP_P(NODE) \
(IDENTIFIER_ANY_OP_P (NODE) \
& (!IDENTIFIER_KIND_BIT_1 (NODE)))
/* True if this identifier is for any assignment. Values 5. */
#define IDENTIFIER_ASSIGN_OP_P(NODE) \
(IDENTIFIER_OVL_OP_P (NODE) \
& IDENTIFIER_KIND_BIT_0 (NODE))
/* True if this identifier is the name of a type-conversion
operator. Value 7. */
#define IDENTIFIER_CONV_OP_P(NODE) \
(IDENTIFIER_ANY_OP_P (NODE) \
& IDENTIFIER_KIND_BIT_1 (NODE) \
& (!IDENTIFIER_KIND_BIT_0 (NODE)))
/* True if this identifier is a new or delete operator. */
#define IDENTIFIER_NEWDEL_OP_P(NODE) \
(IDENTIFIER_OVL_OP_P (NODE) \
&& IDENTIFIER_OVL_OP_FLAGS (NODE) & OVL_OP_FLAG_ALLOC)
/* True if this identifier is a new operator. */
#define IDENTIFIER_NEW_OP_P(NODE) \
(IDENTIFIER_OVL_OP_P (NODE) \
&& (IDENTIFIER_OVL_OP_FLAGS (NODE) \
& (OVL_OP_FLAG_ALLOC | OVL_OP_FLAG_DELETE)) == OVL_OP_FLAG_ALLOC)
/* Access a C++-specific index for identifier NODE.
Used to optimize operator mappings etc. */
#define IDENTIFIER_CP_INDEX(NODE) \
(IDENTIFIER_NODE_CHECK(NODE)->base.u.bits.address_space)
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */
#define C_TYPE_FIELDS_READONLY(TYPE) \
(LANG_TYPE_CLASS_CHECK (TYPE)->fields_readonly)
/* The tokens stored in the default argument. */
#define DEFARG_TOKENS(NODE) \
(((struct tree_default_arg *)DEFAULT_ARG_CHECK (NODE))->tokens)
#define DEFARG_INSTANTIATIONS(NODE) \
(((struct tree_default_arg *)DEFAULT_ARG_CHECK (NODE))->instantiations)
struct GTY (()) tree_default_arg {
struct tree_common common;
struct cp_token_cache *tokens;
vec<tree, va_gc> *instantiations;
};
#define DEFERRED_NOEXCEPT_PATTERN(NODE) \
(((struct tree_deferred_noexcept *)DEFERRED_NOEXCEPT_CHECK (NODE))->pattern)
#define DEFERRED_NOEXCEPT_ARGS(NODE) \
(((struct tree_deferred_noexcept *)DEFERRED_NOEXCEPT_CHECK (NODE))->args)
#define DEFERRED_NOEXCEPT_SPEC_P(NODE) \
((NODE) && (TREE_PURPOSE (NODE)) \
&& (TREE_CODE (TREE_PURPOSE (NODE)) == DEFERRED_NOEXCEPT))
#define UNEVALUATED_NOEXCEPT_SPEC_P(NODE) \
(DEFERRED_NOEXCEPT_SPEC_P (NODE) \
&& DEFERRED_NOEXCEPT_PATTERN (TREE_PURPOSE (NODE)) == NULL_TREE)
struct GTY (()) tree_deferred_noexcept {
struct tree_base base;
tree pattern;
tree args;
};
/* The condition associated with the static assertion. This must be
an integral constant expression. */
#define STATIC_ASSERT_CONDITION(NODE) \
(((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->condition)
/* The message associated with the static assertion. This must be a
string constant, which will be emitted as an error message when the
static assert condition is false. */
#define STATIC_ASSERT_MESSAGE(NODE) \
(((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->message)
/* Source location information for a static assertion. */
#define STATIC_ASSERT_SOURCE_LOCATION(NODE) \
(((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->location)
struct GTY (()) tree_static_assert {
struct tree_common common;
tree condition;
tree message;
location_t location;
};
struct GTY (()) tree_argument_pack_select {
struct tree_common common;
tree argument_pack;
int index;
};
/* The different kinds of traits that we encounter. */
enum cp_trait_kind
{
CPTK_BASES,
CPTK_DIRECT_BASES,
CPTK_HAS_NOTHROW_ASSIGN,
CPTK_HAS_NOTHROW_CONSTRUCTOR,
CPTK_HAS_NOTHROW_COPY,
CPTK_HAS_TRIVIAL_ASSIGN,
CPTK_HAS_TRIVIAL_CONSTRUCTOR,
CPTK_HAS_TRIVIAL_COPY,
CPTK_HAS_TRIVIAL_DESTRUCTOR,
CPTK_HAS_UNIQUE_OBJ_REPRESENTATIONS,
CPTK_HAS_VIRTUAL_DESTRUCTOR,
CPTK_IS_ABSTRACT,
CPTK_IS_AGGREGATE,
CPTK_IS_BASE_OF,
CPTK_IS_CLASS,
CPTK_IS_EMPTY,
CPTK_IS_ENUM,
CPTK_IS_FINAL,
CPTK_IS_LITERAL_TYPE,
CPTK_IS_POD,
CPTK_IS_POLYMORPHIC,
CPTK_IS_SAME_AS,
CPTK_IS_STD_LAYOUT,
CPTK_IS_TRIVIAL,
CPTK_IS_TRIVIALLY_ASSIGNABLE,
CPTK_IS_TRIVIALLY_CONSTRUCTIBLE,
CPTK_IS_TRIVIALLY_COPYABLE,
CPTK_IS_UNION,
CPTK_UNDERLYING_TYPE,
CPTK_IS_ASSIGNABLE,
CPTK_IS_CONSTRUCTIBLE
};
/* The types that we are processing. */
#define TRAIT_EXPR_TYPE1(NODE) \
(((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->type1)
#define TRAIT_EXPR_TYPE2(NODE) \
(((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->type2)
/* The specific trait that we are processing. */
#define TRAIT_EXPR_KIND(NODE) \
(((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->kind)
struct GTY (()) tree_trait_expr {
struct tree_common common;
tree type1;
tree type2;
enum cp_trait_kind kind;
};
/* Based off of TYPE_UNNAMED_P. */
#define LAMBDA_TYPE_P(NODE) \
(CLASS_TYPE_P (NODE) && CLASSTYPE_LAMBDA_EXPR (NODE))
/* Test if FUNCTION_DECL is a lambda function. */
#define LAMBDA_FUNCTION_P(FNDECL) \
(DECL_DECLARES_FUNCTION_P (FNDECL) \
&& DECL_OVERLOADED_OPERATOR_P (FNDECL) \
&& DECL_OVERLOADED_OPERATOR_IS (FNDECL, CALL_EXPR) \
&& LAMBDA_TYPE_P (CP_DECL_CONTEXT (FNDECL)))
enum cp_lambda_default_capture_mode_type {
CPLD_NONE,
CPLD_COPY,
CPLD_REFERENCE
};
/* The method of default capture, if any. */
#define LAMBDA_EXPR_DEFAULT_CAPTURE_MODE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->default_capture_mode)
/* The capture-list, including `this'. Each capture is stored as a FIELD_DECL
* so that the name, type, and field are all together, whether or not it has
* been added to the lambda's class type.
TREE_LIST:
TREE_PURPOSE: The FIELD_DECL for this capture.
TREE_VALUE: The initializer. This is part of a GNU extension. */
#define LAMBDA_EXPR_CAPTURE_LIST(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->capture_list)
/* During parsing of the lambda-introducer, the node in the capture-list
that holds the 'this' capture. During parsing of the body, the
capture proxy for that node. */
#define LAMBDA_EXPR_THIS_CAPTURE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->this_capture)
/* Predicate tracking whether `this' is in the effective capture set. */
#define LAMBDA_EXPR_CAPTURES_THIS_P(NODE) \
LAMBDA_EXPR_THIS_CAPTURE(NODE)
/* Predicate tracking whether the lambda was declared 'mutable'. */
#define LAMBDA_EXPR_MUTABLE_P(NODE) \
TREE_LANG_FLAG_1 (LAMBDA_EXPR_CHECK (NODE))
/* True iff uses of a const variable capture were optimized away. */
#define LAMBDA_EXPR_CAPTURE_OPTIMIZED(NODE) \
TREE_LANG_FLAG_2 (LAMBDA_EXPR_CHECK (NODE))
/* True if this TREE_LIST in LAMBDA_EXPR_CAPTURE_LIST is for an explicit
capture. */
#define LAMBDA_CAPTURE_EXPLICIT_P(NODE) \
TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE))
/* The source location of the lambda. */
#define LAMBDA_EXPR_LOCATION(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->locus)
/* The mangling scope for the lambda: FUNCTION_DECL, PARM_DECL, VAR_DECL,
FIELD_DECL or NULL_TREE. If this is NULL_TREE, we have no linkage. */
#define LAMBDA_EXPR_EXTRA_SCOPE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->extra_scope)
/* If EXTRA_SCOPE, this is the number of the lambda within that scope. */
#define LAMBDA_EXPR_DISCRIMINATOR(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->discriminator)
/* During parsing of the lambda, a vector of capture proxies which need
to be pushed once we're done processing a nested lambda. */
#define LAMBDA_EXPR_PENDING_PROXIES(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->pending_proxies)
/* The closure type of the lambda, which is also the type of the
LAMBDA_EXPR. */
#define LAMBDA_EXPR_CLOSURE(NODE) \
(TREE_TYPE (LAMBDA_EXPR_CHECK (NODE)))
struct GTY (()) tree_lambda_expr
{
struct tree_typed typed;
tree capture_list;
tree this_capture;
tree extra_scope;
vec<tree, va_gc> *pending_proxies;
location_t locus;
enum cp_lambda_default_capture_mode_type default_capture_mode;
int discriminator;
};
/* A (typedef,context,usage location) triplet.
It represents a typedef used through a
context at a given source location.
e.g.
struct foo {
typedef int myint;
};
struct bar {
foo::myint v; // #1<-- this location.
};
In bar, the triplet will be (myint, foo, #1).
*/
struct GTY(()) qualified_typedef_usage_s {
tree typedef_decl;
tree context;
location_t locus;
};
typedef struct qualified_typedef_usage_s qualified_typedef_usage_t;
/* Non-zero if this template specialization has access violations that
should be rechecked when the function is instantiated outside argument
deduction. */
#define TINFO_HAS_ACCESS_ERRORS(NODE) \
(TREE_LANG_FLAG_0 (TEMPLATE_INFO_CHECK (NODE)))
#define FNDECL_HAS_ACCESS_ERRORS(NODE) \
(TINFO_HAS_ACCESS_ERRORS (DECL_TEMPLATE_INFO (NODE)))
/* Non-zero if this variable template specialization was specified using a
template-id, so it's a partial or full specialization and not a definition
of the member template of a particular class specialization. */
#define TINFO_USED_TEMPLATE_ID(NODE) \
(TREE_LANG_FLAG_1 (TEMPLATE_INFO_CHECK (NODE)))
struct GTY(()) tree_template_info {
struct tree_common common;
vec<qualified_typedef_usage_t, va_gc> *typedefs_needing_access_checking;
};
// Constraint information for a C++ declaration. Constraint information is
// comprised of:
//
// - a constraint expression introduced by the template header
// - a constraint expression introduced by a function declarator
// - the associated constraints, which are the conjunction of those,
// and used for declaration matching
//
// The template and declarator requirements are kept to support pretty
// printing constrained declarations.
struct GTY(()) tree_constraint_info {
struct tree_base base;
tree template_reqs;
tree declarator_reqs;
tree associated_constr;
};
// Require that pointer P is non-null before returning.
template<typename T>
inline T*
check_nonnull (T* p)
{
gcc_assert (p);
return p;
}
// Returns true iff T is non-null and represents constraint info.
inline tree_constraint_info *
check_constraint_info (tree t)
{
if (t && TREE_CODE (t) == CONSTRAINT_INFO)
return (tree_constraint_info *)t;
return NULL;
}
// Access the expression describing the template constraints. This may be
// null if no constraints were introduced in the template parameter list,
// a requirements clause after the template parameter list, or constraints
// through a constrained-type-specifier.
#define CI_TEMPLATE_REQS(NODE) \
check_constraint_info (check_nonnull(NODE))->template_reqs
// Access the expression describing the trailing constraints. This is non-null
// for any implicit instantiation of a constrained declaration. For a
// templated declaration it is non-null only when a trailing requires-clause
// was specified.
#define CI_DECLARATOR_REQS(NODE) \
check_constraint_info (check_nonnull(NODE))->declarator_reqs
// The computed associated constraint expression for a declaration.
#define CI_ASSOCIATED_CONSTRAINTS(NODE) \
check_constraint_info (check_nonnull(NODE))->associated_constr
// Access the logical constraints on the template parameters introduced
// at a given template parameter list level indicated by NODE.
#define TEMPLATE_PARMS_CONSTRAINTS(NODE) \
TREE_TYPE (TREE_LIST_CHECK (NODE))
// Access the logical constraints on the template parameter declaration
// indicated by NODE.
#define TEMPLATE_PARM_CONSTRAINTS(NODE) \
TREE_TYPE (TREE_LIST_CHECK (NODE))
/* Non-zero if the noexcept is present in a compound requirement. */
#define COMPOUND_REQ_NOEXCEPT_P(NODE) \
TREE_LANG_FLAG_0 (TREE_CHECK (NODE, COMPOUND_REQ))
/* The constraints on an 'auto' placeholder type, used in an argument deduction
constraint. */
#define PLACEHOLDER_TYPE_CONSTRAINTS(NODE) \
DECL_SIZE_UNIT (TYPE_NAME (NODE))
/* The expression evaluated by the predicate constraint. */
#define PRED_CONSTR_EXPR(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, PRED_CONSTR), 0)
/* The concept of a concept check. */
#define CHECK_CONSTR_CONCEPT(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, CHECK_CONSTR), 0)
/* The template arguments of a concept check. */
#define CHECK_CONSTR_ARGS(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, CHECK_CONSTR), 1)
/* The expression validated by the predicate constraint. */
#define EXPR_CONSTR_EXPR(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, EXPR_CONSTR), 0)
/* The type validated by the predicate constraint. */
#define TYPE_CONSTR_TYPE(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, TYPE_CONSTR), 0)
/* In an implicit conversion constraint, the source expression. */
#define ICONV_CONSTR_EXPR(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, ICONV_CONSTR), 0)
/* In an implicit conversion constraint, the target type. */
#define ICONV_CONSTR_TYPE(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, ICONV_CONSTR), 1)
/* In an argument deduction constraint, the source expression. */
#define DEDUCT_CONSTR_EXPR(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, DEDUCT_CONSTR), 0)
/* In an argument deduction constraint, the target type pattern. */
#define DEDUCT_CONSTR_PATTERN(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, DEDUCT_CONSTR), 1)
/* In an argument deduction constraint, the list of placeholder nodes. */
#define DEDUCT_CONSTR_PLACEHOLDER(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, DEDUCT_CONSTR), 2)
/* The expression of an exception constraint. */
#define EXCEPT_CONSTR_EXPR(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, EXCEPT_CONSTR), 0)
/* In a parameterized constraint, the local parameters. */
#define PARM_CONSTR_PARMS(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, PARM_CONSTR), 0)
/* In a parameterized constraint, the operand. */
#define PARM_CONSTR_OPERAND(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, PARM_CONSTR), 1)
/* Whether a PARM_DECL represents a local parameter in a
requires-expression. */
#define CONSTRAINT_VAR_P(NODE) \
DECL_LANG_FLAG_2 (TREE_CHECK (NODE, PARM_DECL))
/* The concept constraining this constrained template-parameter. */
#define CONSTRAINED_PARM_CONCEPT(NODE) \
DECL_SIZE_UNIT (TYPE_DECL_CHECK (NODE))
/* Any extra template arguments specified for a constrained
template-parameter. */
#define CONSTRAINED_PARM_EXTRA_ARGS(NODE) \
DECL_SIZE (TYPE_DECL_CHECK (NODE))
/* The first template parameter of CONSTRAINED_PARM_CONCEPT to be used as a
prototype for the constrained parameter in finish_shorthand_constraint,
attached for convenience. */
#define CONSTRAINED_PARM_PROTOTYPE(NODE) \
DECL_INITIAL (TYPE_DECL_CHECK (NODE))
enum cp_tree_node_structure_enum {
TS_CP_GENERIC,
TS_CP_IDENTIFIER,
TS_CP_TPI,
TS_CP_PTRMEM,
TS_CP_OVERLOAD,
TS_CP_BASELINK,
TS_CP_TEMPLATE_DECL,
TS_CP_DEFAULT_ARG,
TS_CP_DEFERRED_NOEXCEPT,
TS_CP_STATIC_ASSERT,
TS_CP_ARGUMENT_PACK_SELECT,
TS_CP_TRAIT_EXPR,
TS_CP_LAMBDA_EXPR,
TS_CP_TEMPLATE_INFO,
TS_CP_CONSTRAINT_INFO,
TS_CP_USERDEF_LITERAL
};
/* The resulting tree type. */
union GTY((desc ("cp_tree_node_structure (&%h)"),
chain_next ("(union lang_tree_node *) c_tree_chain_next (&%h.generic)"))) lang_tree_node {
union tree_node GTY ((tag ("TS_CP_GENERIC"),
desc ("tree_node_structure (&%h)"))) generic;
struct template_parm_index GTY ((tag ("TS_CP_TPI"))) tpi;
struct ptrmem_cst GTY ((tag ("TS_CP_PTRMEM"))) ptrmem;
struct tree_overload GTY ((tag ("TS_CP_OVERLOAD"))) overload;
struct tree_baselink GTY ((tag ("TS_CP_BASELINK"))) baselink;
struct tree_template_decl GTY ((tag ("TS_CP_TEMPLATE_DECL"))) template_decl;
struct tree_default_arg GTY ((tag ("TS_CP_DEFAULT_ARG"))) default_arg;
struct tree_deferred_noexcept GTY ((tag ("TS_CP_DEFERRED_NOEXCEPT"))) deferred_noexcept;
struct lang_identifier GTY ((tag ("TS_CP_IDENTIFIER"))) identifier;
struct tree_static_assert GTY ((tag ("TS_CP_STATIC_ASSERT")))
static_assertion;
struct tree_argument_pack_select GTY ((tag ("TS_CP_ARGUMENT_PACK_SELECT")))
argument_pack_select;
struct tree_trait_expr GTY ((tag ("TS_CP_TRAIT_EXPR")))
trait_expression;
struct tree_lambda_expr GTY ((tag ("TS_CP_LAMBDA_EXPR")))
lambda_expression;
struct tree_template_info GTY ((tag ("TS_CP_TEMPLATE_INFO")))
template_info;
struct tree_constraint_info GTY ((tag ("TS_CP_CONSTRAINT_INFO")))
constraint_info;
struct tree_userdef_literal GTY ((tag ("TS_CP_USERDEF_LITERAL")))
userdef_literal;
};
/* Global state. */
struct GTY(()) saved_scope {
vec<cxx_saved_binding, va_gc> *old_bindings;
tree old_namespace;
vec<tree, va_gc> *decl_ns_list;
tree class_name;
tree class_type;
tree access_specifier;
tree function_decl;
vec<tree, va_gc> *lang_base;
tree lang_name;
tree template_parms;
cp_binding_level *x_previous_class_level;
tree x_saved_tree;
/* Only used for uses of this in trailing return type. */
tree x_current_class_ptr;
tree x_current_class_ref;
int x_processing_template_decl;
int x_processing_specialization;
BOOL_BITFIELD x_processing_explicit_instantiation : 1;
BOOL_BITFIELD need_pop_function_context : 1;
/* Nonzero if we are parsing the discarded statement of a constexpr
if-statement. */
BOOL_BITFIELD discarded_stmt : 1;
int unevaluated_operand;
int inhibit_evaluation_warnings;
int noexcept_operand;
/* If non-zero, implicit "omp declare target" attribute is added into the
attribute lists. */
int omp_declare_target_attribute;
struct stmt_tree_s x_stmt_tree;
cp_binding_level *class_bindings;
cp_binding_level *bindings;
hash_map<tree, tree> *GTY((skip)) x_local_specializations;
struct saved_scope *prev;
};
extern GTY(()) struct saved_scope *scope_chain;
/* The current open namespace. */
#define current_namespace scope_chain->old_namespace
/* The stack for namespaces of current declarations. */
#define decl_namespace_list scope_chain->decl_ns_list
/* IDENTIFIER_NODE: name of current class */
#define current_class_name scope_chain->class_name
/* _TYPE: the type of the current class */
#define current_class_type scope_chain->class_type
/* When parsing a class definition, the access specifier most recently
given by the user, or, if no access specifier was given, the
default value appropriate for the kind of class (i.e., struct,
class, or union). */
#define current_access_specifier scope_chain->access_specifier
/* Pointer to the top of the language name stack. */
#define current_lang_base scope_chain->lang_base
#define current_lang_name scope_chain->lang_name
/* When parsing a template declaration, a TREE_LIST represents the
active template parameters. Each node in the list represents one
level of template parameters. The innermost level is first in the
list. The depth of each level is stored as an INTEGER_CST in the
TREE_PURPOSE of each node. The parameters for that level are
stored in the TREE_VALUE. */
#define current_template_parms scope_chain->template_parms
#define processing_template_decl scope_chain->x_processing_template_decl
#define processing_specialization scope_chain->x_processing_specialization
#define processing_explicit_instantiation scope_chain->x_processing_explicit_instantiation
#define in_discarded_stmt scope_chain->discarded_stmt
/* RAII sentinel to handle clearing processing_template_decl and restoring
it when done. */
struct processing_template_decl_sentinel
{
int saved;
processing_template_decl_sentinel (bool reset = true)
: saved (processing_template_decl)
{
if (reset)
processing_template_decl = 0;
}
~processing_template_decl_sentinel()
{
processing_template_decl = saved;
}
};
/* RAII sentinel to disable certain warnings during template substitution
and elsewhere. */
struct warning_sentinel
{
int &flag;
int val;
warning_sentinel(int& flag, bool suppress=true)
: flag(flag), val(flag) { if (suppress) flag = 0; }
~warning_sentinel() { flag = val; }
};
/* RAII sentinel that saves the value of a variable, optionally
overrides it right away, and restores its value when the sentinel
id destructed. */
template <typename T>
class temp_override
{
T& overridden_variable;
T saved_value;
public:
temp_override(T& var) : overridden_variable (var), saved_value (var) {}
temp_override(T& var, T overrider)
: overridden_variable (var), saved_value (var)
{
overridden_variable = overrider;
}
~temp_override() { overridden_variable = saved_value; }
};
/* The cached class binding level, from the most recently exited
class, or NULL if none. */
#define previous_class_level scope_chain->x_previous_class_level
/* A map from local variable declarations in the body of the template
presently being instantiated to the corresponding instantiated
local variables. */
#define local_specializations scope_chain->x_local_specializations
/* Nonzero if we are parsing the operand of a noexcept operator. */
#define cp_noexcept_operand scope_chain->noexcept_operand
/* A list of private types mentioned, for deferred access checking. */
struct GTY((for_user)) cxx_int_tree_map {
unsigned int uid;
tree to;
};
struct cxx_int_tree_map_hasher : ggc_ptr_hash<cxx_int_tree_map>
{
static hashval_t hash (cxx_int_tree_map *);
static bool equal (cxx_int_tree_map *, cxx_int_tree_map *);
};
struct named_label_entry; /* Defined in decl.c. */
struct named_label_hash : ggc_remove <named_label_entry *>
{
typedef named_label_entry *value_type;
typedef tree compare_type; /* An identifier. */
inline static hashval_t hash (value_type);
inline static bool equal (const value_type, compare_type);
inline static void mark_empty (value_type &p) {p = NULL;}
inline static bool is_empty (value_type p) {return !p;}
/* Nothing is deletable. Everything is insertable. */
inline static bool is_deleted (value_type) { return false; }
inline static void mark_deleted (value_type) { gcc_unreachable (); }
};
/* Global state pertinent to the current function. */
struct GTY(()) language_function {
struct c_language_function base;
tree x_cdtor_label;
tree x_current_class_ptr;
tree x_current_class_ref;
tree x_eh_spec_block;
tree x_in_charge_parm;
tree x_vtt_parm;
tree x_return_value;
tree x_auto_return_pattern;
BOOL_BITFIELD returns_value : 1;
BOOL_BITFIELD returns_null : 1;
BOOL_BITFIELD returns_abnormally : 1;
BOOL_BITFIELD infinite_loop: 1;
BOOL_BITFIELD x_in_function_try_handler : 1;
BOOL_BITFIELD x_in_base_initializer : 1;
/* True if this function can throw an exception. */
BOOL_BITFIELD can_throw : 1;
BOOL_BITFIELD invalid_constexpr : 1;
hash_table<named_label_hash> *x_named_labels;
cp_binding_level *bindings;
vec<tree, va_gc> *x_local_names;
/* Tracking possibly infinite loops. This is a vec<tree> only because
vec<bool> doesn't work with gtype. */
vec<tree, va_gc> *infinite_loops;
hash_table<cxx_int_tree_map_hasher> *extern_decl_map;
};
/* The current C++-specific per-function global variables. */
#define cp_function_chain (cfun->language)
/* In a constructor destructor, the point at which all derived class
destroying/construction has been done. I.e., just before a
constructor returns, or before any base class destroying will be done
in a destructor. */
#define cdtor_label cp_function_chain->x_cdtor_label
/* When we're processing a member function, current_class_ptr is the
PARM_DECL for the `this' pointer. The current_class_ref is an
expression for `*this'. */
#define current_class_ptr \
(*(cfun && cp_function_chain \
? &cp_function_chain->x_current_class_ptr \
: &scope_chain->x_current_class_ptr))
#define current_class_ref \
(*(cfun && cp_function_chain \
? &cp_function_chain->x_current_class_ref \
: &scope_chain->x_current_class_ref))
/* The EH_SPEC_BLOCK for the exception-specifiers for the current
function, if any. */
#define current_eh_spec_block cp_function_chain->x_eh_spec_block
/* The `__in_chrg' parameter for the current function. Only used for
constructors and destructors. */
#define current_in_charge_parm cp_function_chain->x_in_charge_parm
/* The `__vtt_parm' parameter for the current function. Only used for
constructors and destructors. */
#define current_vtt_parm cp_function_chain->x_vtt_parm
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement that specifies a return value is seen. */
#define current_function_returns_value cp_function_chain->returns_value
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement with no argument is seen. */
#define current_function_returns_null cp_function_chain->returns_null
/* Set to 0 at beginning of a function definition, set to 1 if
a call to a noreturn function is seen. */
#define current_function_returns_abnormally \
cp_function_chain->returns_abnormally
/* Set to 0 at beginning of a function definition, set to 1 if we see an
obvious infinite loop. This can have false positives and false
negatives, so it should only be used as a heuristic. */
#define current_function_infinite_loop cp_function_chain->infinite_loop
/* Nonzero if we are processing a base initializer. Zero elsewhere. */
#define in_base_initializer cp_function_chain->x_in_base_initializer
#define in_function_try_handler cp_function_chain->x_in_function_try_handler
/* Expression always returned from function, or error_mark_node
otherwise, for use by the automatic named return value optimization. */
#define current_function_return_value \
(cp_function_chain->x_return_value)
/* A type involving 'auto' to be used for return type deduction. */
#define current_function_auto_return_pattern \
(cp_function_chain->x_auto_return_pattern)
/* In parser.c. */
extern tree cp_literal_operator_id (const char *);
/* TRUE if a tree code represents a statement. */
extern bool statement_code_p[MAX_TREE_CODES];
#define STATEMENT_CODE_P(CODE) statement_code_p[(int) (CODE)]
enum languages { lang_c, lang_cplusplus };
/* Macros to make error reporting functions' lives easier. */
#define TYPE_LINKAGE_IDENTIFIER(NODE) \
(TYPE_IDENTIFIER (TYPE_MAIN_VARIANT (NODE)))
#define TYPE_NAME_STRING(NODE) (IDENTIFIER_POINTER (TYPE_IDENTIFIER (NODE)))
#define TYPE_NAME_LENGTH(NODE) (IDENTIFIER_LENGTH (TYPE_IDENTIFIER (NODE)))
/* Nonzero if NODE has no name for linkage purposes. */
#define TYPE_UNNAMED_P(NODE) \
(OVERLOAD_TYPE_P (NODE) && anon_aggrname_p (TYPE_LINKAGE_IDENTIFIER (NODE)))
/* The _DECL for this _TYPE. */
#define TYPE_MAIN_DECL(NODE) (TYPE_STUB_DECL (TYPE_MAIN_VARIANT (NODE)))
/* Nonzero if T is a type that could resolve to any kind of concrete type
at instantiation time. */
#define WILDCARD_TYPE_P(T) \
(TREE_CODE (T) == TEMPLATE_TYPE_PARM \
|| TREE_CODE (T) == TYPENAME_TYPE \
|| TREE_CODE (T) == TYPEOF_TYPE \
|| TREE_CODE (T) == BOUND_TEMPLATE_TEMPLATE_PARM \
|| TREE_CODE (T) == DECLTYPE_TYPE)
/* Nonzero if T is a class (or struct or union) type. Also nonzero
for template type parameters, typename types, and instantiated
template template parameters. Keep these checks in ascending code
order. */
#define MAYBE_CLASS_TYPE_P(T) (WILDCARD_TYPE_P (T) || CLASS_TYPE_P (T))
/* Set CLASS_TYPE_P for T to VAL. T must be a class, struct, or
union type. */
#define SET_CLASS_TYPE_P(T, VAL) \
(TYPE_LANG_FLAG_5 (RECORD_OR_UNION_CHECK (T)) = (VAL))
/* Nonzero if T is a class type. Zero for template type parameters,
typename types, and so forth. */
#define CLASS_TYPE_P(T) \
(RECORD_OR_UNION_CODE_P (TREE_CODE (T)) && TYPE_LANG_FLAG_5 (T))
/* Nonzero if T is a class type but not an union. */
#define NON_UNION_CLASS_TYPE_P(T) \
(TREE_CODE (T) == RECORD_TYPE && TYPE_LANG_FLAG_5 (T))
/* Keep these checks in ascending code order. */
#define RECORD_OR_UNION_CODE_P(T) \
((T) == RECORD_TYPE || (T) == UNION_TYPE)
#define OVERLOAD_TYPE_P(T) \
(CLASS_TYPE_P (T) || TREE_CODE (T) == ENUMERAL_TYPE)
/* True if this type is dependent. This predicate is only valid if
TYPE_DEPENDENT_P_VALID is true. */
#define TYPE_DEPENDENT_P(NODE) TYPE_LANG_FLAG_0 (NODE)
/* True if dependent_type_p has been called for this type, with the
result that TYPE_DEPENDENT_P is valid. */
#define TYPE_DEPENDENT_P_VALID(NODE) TYPE_LANG_FLAG_6(NODE)
/* Nonzero if this type is const-qualified. */
#define CP_TYPE_CONST_P(NODE) \
((cp_type_quals (NODE) & TYPE_QUAL_CONST) != 0)
/* Nonzero if this type is volatile-qualified. */
#define CP_TYPE_VOLATILE_P(NODE) \
((cp_type_quals (NODE) & TYPE_QUAL_VOLATILE) != 0)
/* Nonzero if this type is restrict-qualified. */
#define CP_TYPE_RESTRICT_P(NODE) \
((cp_type_quals (NODE) & TYPE_QUAL_RESTRICT) != 0)
/* Nonzero if this type is const-qualified, but not
volatile-qualified. Other qualifiers are ignored. This macro is
used to test whether or not it is OK to bind an rvalue to a
reference. */
#define CP_TYPE_CONST_NON_VOLATILE_P(NODE) \
((cp_type_quals (NODE) & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE)) \
== TYPE_QUAL_CONST)
#define FUNCTION_ARG_CHAIN(NODE) \
TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (NODE)))
/* Given a FUNCTION_DECL, returns the first TREE_LIST out of TYPE_ARG_TYPES
which refers to a user-written parameter. */
#define FUNCTION_FIRST_USER_PARMTYPE(NODE) \
skip_artificial_parms_for ((NODE), TYPE_ARG_TYPES (TREE_TYPE (NODE)))
/* Similarly, but for DECL_ARGUMENTS. */
#define FUNCTION_FIRST_USER_PARM(NODE) \
skip_artificial_parms_for ((NODE), DECL_ARGUMENTS (NODE))
/* Nonzero iff TYPE is derived from PARENT. Ignores accessibility and
ambiguity issues. */
#define DERIVED_FROM_P(PARENT, TYPE) \
(lookup_base ((TYPE), (PARENT), ba_any, NULL, tf_none) != NULL_TREE)
/* Gives the visibility specification for a class type. */
#define CLASSTYPE_VISIBILITY(TYPE) \
DECL_VISIBILITY (TYPE_MAIN_DECL (TYPE))
#define CLASSTYPE_VISIBILITY_SPECIFIED(TYPE) \
DECL_VISIBILITY_SPECIFIED (TYPE_MAIN_DECL (TYPE))
struct GTY (()) tree_pair_s {
tree purpose;
tree value;
};
typedef tree_pair_s *tree_pair_p;
/* This structure provides additional information above and beyond
what is provide in the ordinary tree_type. In the past, we used it
for the types of class types, template parameters types, typename
types, and so forth. However, there can be many (tens to hundreds
of thousands) of template parameter types in a compilation, and
there's no need for this additional information in that case.
Therefore, we now use this data structure only for class types.
In the past, it was thought that there would be relatively few
class types. However, in the presence of heavy use of templates,
many (i.e., thousands) of classes can easily be generated.
Therefore, we should endeavor to keep the size of this structure to
a minimum. */
struct GTY(()) lang_type {
unsigned char align;
unsigned has_type_conversion : 1;
unsigned has_copy_ctor : 1;
unsigned has_default_ctor : 1;
unsigned const_needs_init : 1;
unsigned ref_needs_init : 1;
unsigned has_const_copy_assign : 1;
unsigned use_template : 2;
unsigned has_mutable : 1;
unsigned com_interface : 1;
unsigned non_pod_class : 1;
unsigned nearly_empty_p : 1;
unsigned user_align : 1;
unsigned has_copy_assign : 1;
unsigned has_new : 1;
unsigned has_array_new : 1;
unsigned gets_delete : 2;
unsigned interface_only : 1;
unsigned interface_unknown : 1;
unsigned contains_empty_class_p : 1;
unsigned anon_aggr : 1;
unsigned non_zero_init : 1;
unsigned empty_p : 1;
/* 32 bits allocated. */
unsigned vec_new_uses_cookie : 1;
unsigned declared_class : 1;
unsigned diamond_shaped : 1;
unsigned repeated_base : 1;
unsigned being_defined : 1;
unsigned debug_requested : 1;
unsigned fields_readonly : 1;
unsigned ptrmemfunc_flag : 1;
unsigned was_anonymous : 1;
unsigned lazy_default_ctor : 1;
unsigned lazy_copy_ctor : 1;
unsigned lazy_copy_assign : 1;
unsigned lazy_destructor : 1;
unsigned has_const_copy_ctor : 1;
unsigned has_complex_copy_ctor : 1;
unsigned has_complex_copy_assign : 1;
unsigned non_aggregate : 1;
unsigned has_complex_dflt : 1;
unsigned has_list_ctor : 1;
unsigned non_std_layout : 1;
unsigned is_literal : 1;
unsigned lazy_move_ctor : 1;
unsigned lazy_move_assign : 1;
unsigned has_complex_move_ctor : 1;
unsigned has_complex_move_assign : 1;
unsigned has_constexpr_ctor : 1;
unsigned unique_obj_representations : 1;
unsigned unique_obj_representations_set : 1;
/* When adding a flag here, consider whether or not it ought to
apply to a template instance if it applies to the template. If
so, make sure to copy it in instantiate_class_template! */
/* There are some bits left to fill out a 32-bit word. Keep track
of this by updating the size of this bitfield whenever you add or
remove a flag. */
unsigned dummy : 4;
tree primary_base;
vec<tree_pair_s, va_gc> *vcall_indices;
tree vtables;
tree typeinfo_var;
vec<tree, va_gc> *vbases;
binding_table nested_udts;
tree as_base;
vec<tree, va_gc> *pure_virtuals;
tree friend_classes;
vec<tree, va_gc> * GTY((reorder ("resort_type_member_vec"))) members;
tree key_method;
tree decl_list;
tree befriending_classes;
/* In a RECORD_TYPE, information specific to Objective-C++, such
as a list of adopted protocols or a pointer to a corresponding
@interface. See objc/objc-act.h for details. */
tree objc_info;
/* FIXME reuse another field? */
tree lambda_expr;
};
/* We used to have a variant type for lang_type. Keep the name of the
checking accessor for the sole survivor. */
#define LANG_TYPE_CLASS_CHECK(NODE) (TYPE_LANG_SPECIFIC (NODE))
/* Nonzero for _CLASSTYPE means that operator delete is defined. */
#define TYPE_GETS_DELETE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->gets_delete)
#define TYPE_GETS_REG_DELETE(NODE) (TYPE_GETS_DELETE (NODE) & 1)
/* Nonzero if `new NODE[x]' should cause the allocation of extra
storage to indicate how many array elements are in use. */
#define TYPE_VEC_NEW_USES_COOKIE(NODE) \
(CLASS_TYPE_P (NODE) \
&& LANG_TYPE_CLASS_CHECK (NODE)->vec_new_uses_cookie)
/* Nonzero means that this _CLASSTYPE node defines ways of converting
itself to other types. */
#define TYPE_HAS_CONVERSION(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_type_conversion)
/* Nonzero means that NODE (a class type) has a default constructor --
but that it has not yet been declared. */
#define CLASSTYPE_LAZY_DEFAULT_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_default_ctor)
/* Nonzero means that NODE (a class type) has a copy constructor --
but that it has not yet been declared. */
#define CLASSTYPE_LAZY_COPY_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_copy_ctor)
/* Nonzero means that NODE (a class type) has a move constructor --
but that it has not yet been declared. */
#define CLASSTYPE_LAZY_MOVE_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_move_ctor)
/* Nonzero means that NODE (a class type) has an assignment operator
-- but that it has not yet been declared. */
#define CLASSTYPE_LAZY_COPY_ASSIGN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_copy_assign)
/* Nonzero means that NODE (a class type) has an assignment operator
-- but that it has not yet been declared. */
#define CLASSTYPE_LAZY_MOVE_ASSIGN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_move_assign)
/* Nonzero means that NODE (a class type) has a destructor -- but that
it has not yet been declared. */
#define CLASSTYPE_LAZY_DESTRUCTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_destructor)
/* Nonzero means that NODE (a class type) is final */
#define CLASSTYPE_FINAL(NODE) \
TYPE_FINAL_P (NODE)
/* Nonzero means that this _CLASSTYPE node overloads operator=(X&). */
#define TYPE_HAS_COPY_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_copy_assign)
/* True iff the class type NODE has an "operator =" whose parameter
has a parameter of type "const X&". */
#define TYPE_HAS_CONST_COPY_ASSIGN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_const_copy_assign)
/* Nonzero means that this _CLASSTYPE node has an X(X&) constructor. */
#define TYPE_HAS_COPY_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_copy_ctor)
#define TYPE_HAS_CONST_COPY_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_const_copy_ctor)
/* Nonzero if this class has an X(initializer_list<T>) constructor. */
#define TYPE_HAS_LIST_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_list_ctor)
/* Nonzero if this class has a constexpr constructor other than a copy/move
constructor. Note that a class can have constexpr constructors for
static initialization even if it isn't a literal class. */
#define TYPE_HAS_CONSTEXPR_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_constexpr_ctor)
/* Nonzero if this class defines an overloaded operator new. (An
operator new [] doesn't count.) */
#define TYPE_HAS_NEW_OPERATOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_new)
/* Nonzero if this class defines an overloaded operator new[]. */
#define TYPE_HAS_ARRAY_NEW_OPERATOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_array_new)
/* Nonzero means that this type is being defined. I.e., the left brace
starting the definition of this type has been seen. */
#define TYPE_BEING_DEFINED(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->being_defined)
/* Nonzero means that this type is either complete or being defined, so we
can do lookup in it. */
#define COMPLETE_OR_OPEN_TYPE_P(NODE) \
(COMPLETE_TYPE_P (NODE) || (CLASS_TYPE_P (NODE) && TYPE_BEING_DEFINED (NODE)))
/* Mark bits for repeated base checks. */
#define TYPE_MARKED_P(NODE) TREE_LANG_FLAG_6 (TYPE_CHECK (NODE))
/* Nonzero if the class NODE has multiple paths to the same (virtual)
base object. */
#define CLASSTYPE_DIAMOND_SHAPED_P(NODE) \
(LANG_TYPE_CLASS_CHECK(NODE)->diamond_shaped)
/* Nonzero if the class NODE has multiple instances of the same base
type. */
#define CLASSTYPE_REPEATED_BASE_P(NODE) \
(LANG_TYPE_CLASS_CHECK(NODE)->repeated_base)
/* The member function with which the vtable will be emitted:
the first noninline non-pure-virtual member function. NULL_TREE
if there is no key function or if this is a class template */
#define CLASSTYPE_KEY_METHOD(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->key_method)
/* Vector of members. During definition, it is unordered and only
member functions are present. After completion it is sorted and
contains both member functions and non-functions. STAT_HACK is
involved to preserve oneslot per name invariant. */
#define CLASSTYPE_MEMBER_VEC(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->members)
/* For class templates, this is a TREE_LIST of all member data,
functions, types, and friends in the order of declaration.
The TREE_PURPOSE of each TREE_LIST is NULL_TREE for a friend,
and the RECORD_TYPE for the class template otherwise. */
#define CLASSTYPE_DECL_LIST(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->decl_list)
/* A FUNCTION_DECL or OVERLOAD for the constructors for NODE. These
are the constructors that take an in-charge parameter. */
#define CLASSTYPE_CONSTRUCTORS(NODE) \
(get_class_binding_direct (NODE, ctor_identifier))
/* A FUNCTION_DECL for the destructor for NODE. This is the
destructors that take an in-charge parameter. If
CLASSTYPE_LAZY_DESTRUCTOR is true, then this entry will be NULL
until the destructor is created with lazily_declare_fn. */
#define CLASSTYPE_DESTRUCTOR(NODE) \
(get_class_binding_direct (NODE, dtor_identifier))
/* A dictionary of the nested user-defined-types (class-types, or enums)
found within this class. This table includes nested member class
templates. */
#define CLASSTYPE_NESTED_UTDS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->nested_udts)
/* Nonzero if NODE has a primary base class, i.e., a base class with
which it shares the virtual function table pointer. */
#define CLASSTYPE_HAS_PRIMARY_BASE_P(NODE) \
(CLASSTYPE_PRIMARY_BINFO (NODE) != NULL_TREE)
/* If non-NULL, this is the binfo for the primary base class, i.e.,
the base class which contains the virtual function table pointer
for this class. */
#define CLASSTYPE_PRIMARY_BINFO(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->primary_base)
/* A vector of BINFOs for the direct and indirect virtual base classes
that this type uses in a post-order depth-first left-to-right
order. (In other words, these bases appear in the order that they
should be initialized.) */
#define CLASSTYPE_VBASECLASSES(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->vbases)
/* The type corresponding to NODE when NODE is used as a base class,
i.e., NODE without virtual base classes or tail padding. */
#define CLASSTYPE_AS_BASE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->as_base)
/* True iff NODE is the CLASSTYPE_AS_BASE version of some type. */
#define IS_FAKE_BASE_TYPE(NODE) \
(TREE_CODE (NODE) == RECORD_TYPE \
&& TYPE_CONTEXT (NODE) && CLASS_TYPE_P (TYPE_CONTEXT (NODE)) \
&& CLASSTYPE_AS_BASE (TYPE_CONTEXT (NODE)) == (NODE))
/* These are the size and alignment of the type without its virtual
base classes, for when we use this type as a base itself. */
#define CLASSTYPE_SIZE(NODE) TYPE_SIZE (CLASSTYPE_AS_BASE (NODE))
#define CLASSTYPE_SIZE_UNIT(NODE) TYPE_SIZE_UNIT (CLASSTYPE_AS_BASE (NODE))
#define CLASSTYPE_ALIGN(NODE) TYPE_ALIGN (CLASSTYPE_AS_BASE (NODE))
#define CLASSTYPE_USER_ALIGN(NODE) TYPE_USER_ALIGN (CLASSTYPE_AS_BASE (NODE))
/* The alignment of NODE, without its virtual bases, in bytes. */
#define CLASSTYPE_ALIGN_UNIT(NODE) \
(CLASSTYPE_ALIGN (NODE) / BITS_PER_UNIT)
/* A vec<tree> of virtual functions which cannot be inherited by
derived classes. When deriving from this type, the derived
class must provide its own definition for each of these functions. */
#define CLASSTYPE_PURE_VIRTUALS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->pure_virtuals)
/* Nonzero means that this type is an abstract class type. */
#define ABSTRACT_CLASS_TYPE_P(NODE) \
(CLASS_TYPE_P (NODE) && CLASSTYPE_PURE_VIRTUALS(NODE))
/* Nonzero means that this type has an X() constructor. */
#define TYPE_HAS_DEFAULT_CONSTRUCTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_default_ctor)
/* Nonzero means that this type contains a mutable member. */
#define CLASSTYPE_HAS_MUTABLE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_mutable)
#define TYPE_HAS_MUTABLE_P(NODE) (cp_has_mutable_p (NODE))
/* Nonzero means that this class type is not POD for the purpose of layout
(as defined in the ABI). This is different from the language's POD. */
#define CLASSTYPE_NON_LAYOUT_POD_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->non_pod_class)
/* Nonzero means that this class type is a non-standard-layout class. */
#define CLASSTYPE_NON_STD_LAYOUT(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->non_std_layout)
/* Nonzero means that this class type does have unique object
representations. */
#define CLASSTYPE_UNIQUE_OBJ_REPRESENTATIONS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->unique_obj_representations)
/* Nonzero means that this class type has
CLASSTYPE_UNIQUE_OBJ_REPRESENTATIONS computed. */
#define CLASSTYPE_UNIQUE_OBJ_REPRESENTATIONS_SET(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->unique_obj_representations_set)
/* Nonzero means that this class contains pod types whose default
initialization is not a zero initialization (namely, pointers to
data members). */
#define CLASSTYPE_NON_ZERO_INIT_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->non_zero_init)
/* Nonzero if this class is "empty" in the sense of the C++ ABI. */
#define CLASSTYPE_EMPTY_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->empty_p)
/* Nonzero if this class is "nearly empty", i.e., contains only a
virtual function table pointer. */
#define CLASSTYPE_NEARLY_EMPTY_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->nearly_empty_p)
/* Nonzero if this class contains an empty subobject. */
#define CLASSTYPE_CONTAINS_EMPTY_CLASS_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->contains_empty_class_p)
/* A list of class types of which this type is a friend. The
TREE_VALUE is normally a TYPE, but will be a TEMPLATE_DECL in the
case of a template friend. */
#define CLASSTYPE_FRIEND_CLASSES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->friend_classes)
/* A list of the classes which grant friendship to this class. */
#define CLASSTYPE_BEFRIENDING_CLASSES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->befriending_classes)
/* The associated LAMBDA_EXPR that made this class. */
#define CLASSTYPE_LAMBDA_EXPR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lambda_expr)
/* The extra mangling scope for this closure type. */
#define LAMBDA_TYPE_EXTRA_SCOPE(NODE) \
(LAMBDA_EXPR_EXTRA_SCOPE (CLASSTYPE_LAMBDA_EXPR (NODE)))
/* Say whether this node was declared as a "class" or a "struct". */
#define CLASSTYPE_DECLARED_CLASS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->declared_class)
/* Nonzero if this class has const members
which have no specified initialization. */
#define CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE) \
(TYPE_LANG_SPECIFIC (NODE) \
? LANG_TYPE_CLASS_CHECK (NODE)->const_needs_init : 0)
#define SET_CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE, VALUE) \
(LANG_TYPE_CLASS_CHECK (NODE)->const_needs_init = (VALUE))
/* Nonzero if this class has ref members
which have no specified initialization. */
#define CLASSTYPE_REF_FIELDS_NEED_INIT(NODE) \
(TYPE_LANG_SPECIFIC (NODE) \
? LANG_TYPE_CLASS_CHECK (NODE)->ref_needs_init : 0)
#define SET_CLASSTYPE_REF_FIELDS_NEED_INIT(NODE, VALUE) \
(LANG_TYPE_CLASS_CHECK (NODE)->ref_needs_init = (VALUE))
/* Nonzero if this class is included from a header file which employs
`#pragma interface', and it is not included in its implementation file. */
#define CLASSTYPE_INTERFACE_ONLY(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_only)
/* True if we have already determined whether or not vtables, VTTs,
typeinfo, and other similar per-class data should be emitted in
this translation unit. This flag does not indicate whether or not
these items should be emitted; it only indicates that we know one
way or the other. */
#define CLASSTYPE_INTERFACE_KNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown == 0)
/* The opposite of CLASSTYPE_INTERFACE_KNOWN. */
#define CLASSTYPE_INTERFACE_UNKNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown)
#define SET_CLASSTYPE_INTERFACE_UNKNOWN_X(NODE,X) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = !!(X))
#define SET_CLASSTYPE_INTERFACE_UNKNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 1)
#define SET_CLASSTYPE_INTERFACE_KNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 0)
/* Nonzero if a _DECL node requires us to output debug info for this class. */
#define CLASSTYPE_DEBUG_REQUESTED(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->debug_requested)
/* Additional macros for inheritance information. */
/* Nonzero means that this class is on a path leading to a new vtable. */
#define BINFO_VTABLE_PATH_MARKED(NODE) BINFO_FLAG_1 (NODE)
/* Nonzero means B (a BINFO) has its own vtable. Any copies will not
have this flag set. */
#define BINFO_NEW_VTABLE_MARKED(B) (BINFO_FLAG_2 (B))
/* Compare a BINFO_TYPE with another type for equality. For a binfo,
this is functionally equivalent to using same_type_p, but
measurably faster. At least one of the arguments must be a
BINFO_TYPE. The other can be a BINFO_TYPE or a regular type. If
BINFO_TYPE(T) ever stops being the main variant of the class the
binfo is for, this macro must change. */
#define SAME_BINFO_TYPE_P(A, B) ((A) == (B))
/* Any subobject that needs a new vtable must have a vptr and must not
be a non-virtual primary base (since it would then use the vtable from a
derived class and never become non-primary.) */
#define SET_BINFO_NEW_VTABLE_MARKED(B) \
(BINFO_NEW_VTABLE_MARKED (B) = 1, \
gcc_assert (!BINFO_PRIMARY_P (B) || BINFO_VIRTUAL_P (B)), \
gcc_assert (TYPE_VFIELD (BINFO_TYPE (B))))
/* Nonzero if this binfo is for a dependent base - one that should not
be searched. */
#define BINFO_DEPENDENT_BASE_P(NODE) BINFO_FLAG_3 (NODE)
/* Nonzero if this binfo has lost its primary base binfo (because that
is a nearly-empty virtual base that has been taken by some other
base in the complete hierarchy. */
#define BINFO_LOST_PRIMARY_P(NODE) BINFO_FLAG_4 (NODE)
/* Nonzero if this BINFO is a primary base class. */
#define BINFO_PRIMARY_P(NODE) BINFO_FLAG_5(NODE)
/* A vec<tree_pair_s> of the vcall indices associated with the class
NODE. The PURPOSE of each element is a FUNCTION_DECL for a virtual
function. The VALUE is the index into the virtual table where the
vcall offset for that function is stored, when NODE is a virtual
base. */
#define CLASSTYPE_VCALL_INDICES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->vcall_indices)
/* The various vtables for the class NODE. The primary vtable will be
first, followed by the construction vtables and VTT, if any. */
#define CLASSTYPE_VTABLES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->vtables)
/* The std::type_info variable representing this class, or NULL if no
such variable has been created. This field is only set for the
TYPE_MAIN_VARIANT of the class. */
#define CLASSTYPE_TYPEINFO_VAR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->typeinfo_var)
/* Accessor macros for the BINFO_VIRTUALS list. */
/* The number of bytes by which to adjust the `this' pointer when
calling this virtual function. Subtract this value from the this
pointer. Always non-NULL, might be constant zero though. */
#define BV_DELTA(NODE) (TREE_PURPOSE (NODE))
/* If non-NULL, the vtable index at which to find the vcall offset
when calling this virtual function. Add the value at that vtable
index to the this pointer. */
#define BV_VCALL_INDEX(NODE) (TREE_TYPE (NODE))
/* The function to call. */
#define BV_FN(NODE) (TREE_VALUE (NODE))
/* Whether or not this entry is for a lost primary virtual base. */
#define BV_LOST_PRIMARY(NODE) (TREE_LANG_FLAG_0 (NODE))
/* For FUNCTION_TYPE or METHOD_TYPE, a list of the exceptions that
this type can raise. Each TREE_VALUE is a _TYPE. The TREE_VALUE
will be NULL_TREE to indicate a throw specification of `()', or
no exceptions allowed. For a noexcept specification, TREE_VALUE
is NULL_TREE and TREE_PURPOSE is the constant-expression. For
a deferred noexcept-specification, TREE_PURPOSE is a DEFERRED_NOEXCEPT
(for templates) or an OVERLOAD list of functions (for implicitly
declared functions). */
#define TYPE_RAISES_EXCEPTIONS(NODE) \
TYPE_LANG_SLOT_1 (FUNC_OR_METHOD_CHECK (NODE))
/* For FUNCTION_TYPE or METHOD_TYPE, return 1 iff it is declared `throw()'
or noexcept(true). */
#define TYPE_NOTHROW_P(NODE) nothrow_spec_p (TYPE_RAISES_EXCEPTIONS (NODE))
/* For FUNCTION_TYPE or METHOD_TYPE, true if NODE is noexcept. This is the
case for things declared noexcept(true) and, with -fnothrow-opt, for
throw() functions. */
#define TYPE_NOEXCEPT_P(NODE) type_noexcept_p (NODE)
/* The binding level associated with the namespace. */
#define NAMESPACE_LEVEL(NODE) \
(LANG_DECL_NS_CHECK (NODE)->level)
/* Discriminator values for lang_decl. */
enum lang_decl_selector
{
lds_min,
lds_fn,
lds_ns,
lds_parm,
lds_decomp
};
/* Flags shared by all forms of DECL_LANG_SPECIFIC.
Some of the flags live here only to make lang_decl_min/fn smaller. Do
not make this struct larger than 32 bits; instead, make sel smaller. */
struct GTY(()) lang_decl_base {
/* Larger than necessary for faster access. */
ENUM_BITFIELD(lang_decl_selector) selector : 16;
ENUM_BITFIELD(languages) language : 1;
unsigned use_template : 2;
unsigned not_really_extern : 1; /* var or fn */
unsigned initialized_in_class : 1; /* var or fn */
unsigned repo_available_p : 1; /* var or fn */
unsigned threadprivate_or_deleted_p : 1; /* var or fn */
unsigned anticipated_p : 1; /* fn, type or template */
/* anticipated_p reused as DECL_OMP_PRIVATIZED_MEMBER in var */
unsigned friend_or_tls : 1; /* var, fn, type or template */
unsigned unknown_bound_p : 1; /* var */
unsigned odr_used : 1; /* var or fn */
unsigned u2sel : 1;
unsigned concept_p : 1; /* applies to vars and functions */
unsigned var_declared_inline_p : 1; /* var */
unsigned dependent_init_p : 1; /* var */
/* 1 spare bit */
};
/* True for DECL codes which have template info and access. */
#define LANG_DECL_HAS_MIN(NODE) \
(VAR_OR_FUNCTION_DECL_P (NODE) \
|| TREE_CODE (NODE) == FIELD_DECL \
|| TREE_CODE (NODE) == CONST_DECL \
|| TREE_CODE (NODE) == TYPE_DECL \
|| TREE_CODE (NODE) == TEMPLATE_DECL \
|| TREE_CODE (NODE) == USING_DECL)
/* DECL_LANG_SPECIFIC for the above codes. */
struct GTY(()) lang_decl_min {
struct lang_decl_base base;
/* In a FUNCTION_DECL for which DECL_THUNK_P holds, this is
THUNK_ALIAS.
In a FUNCTION_DECL for which DECL_THUNK_P does not hold,
VAR_DECL, TYPE_DECL, or TEMPLATE_DECL, this is
DECL_TEMPLATE_INFO. */
tree template_info;
union lang_decl_u2 {
/* In a FUNCTION_DECL for which DECL_THUNK_P holds, this is
THUNK_VIRTUAL_OFFSET.
In a VAR_DECL for which DECL_HAS_VALUE_EXPR_P holds,
this is DECL_CAPTURED_VARIABLE.
Otherwise this is DECL_ACCESS. */
tree GTY ((tag ("0"))) access;
/* For TREE_STATIC VAR_DECL in function, this is DECL_DISCRIMINATOR. */
int GTY ((tag ("1"))) discriminator;
} GTY ((desc ("%0.u.base.u2sel"))) u2;
};
/* Additional DECL_LANG_SPECIFIC information for functions. */
struct GTY(()) lang_decl_fn {
struct lang_decl_min min;
/* In a overloaded operator, this is the compressed operator code. */
unsigned ovl_op_code : 6;
unsigned global_ctor_p : 1;
unsigned global_dtor_p : 1;
unsigned static_function : 1;
unsigned pure_virtual : 1;
unsigned defaulted_p : 1;
unsigned has_in_charge_parm_p : 1;
unsigned has_vtt_parm_p : 1;
unsigned pending_inline_p : 1;
unsigned nonconverting : 1;
unsigned thunk_p : 1;
unsigned this_thunk_p : 1;
unsigned hidden_friend_p : 1;
unsigned omp_declare_reduction_p : 1;
unsigned spare : 13;
/* 32-bits padding on 64-bit host. */
/* For a non-thunk function decl, this is a tree list of
friendly classes. For a thunk function decl, it is the
thunked to function decl. */
tree befriending_classes;
/* For a non-virtual FUNCTION_DECL, this is
DECL_FRIEND_CONTEXT. For a virtual FUNCTION_DECL for which
DECL_THIS_THUNK_P does not hold, this is DECL_THUNKS. Both
this pointer and result pointer adjusting thunks are
chained here. This pointer thunks to return pointer thunks
will be chained on the return pointer thunk. */
tree context;
union lang_decl_u5
{
/* In a non-thunk FUNCTION_DECL or TEMPLATE_DECL, this is
DECL_CLONED_FUNCTION. */
tree GTY ((tag ("0"))) cloned_function;
/* In a FUNCTION_DECL for which THUNK_P holds this is the
THUNK_FIXED_OFFSET. */
HOST_WIDE_INT GTY ((tag ("1"))) fixed_offset;
} GTY ((desc ("%1.thunk_p"))) u5;
union lang_decl_u3
{
struct cp_token_cache * GTY ((tag ("1"))) pending_inline_info;
struct language_function * GTY ((tag ("0")))
saved_language_function;
} GTY ((desc ("%1.pending_inline_p"))) u;
};
/* DECL_LANG_SPECIFIC for namespaces. */
struct GTY(()) lang_decl_ns {
struct lang_decl_base base;
cp_binding_level *level;
/* using directives and inline children. These need to be va_gc,
because of PCH. */
vec<tree, va_gc> *usings;
vec<tree, va_gc> *inlinees;
/* Hash table of bound decls. It'd be nice to have this inline, but
as the hash_map has a dtor, we can't then put this struct into a
union (until moving to c++11). */
hash_table<named_decl_hash> *bindings;
};
/* DECL_LANG_SPECIFIC for parameters. */
struct GTY(()) lang_decl_parm {
struct lang_decl_base base;
int level;
int index;
};
/* Additional DECL_LANG_SPECIFIC information for structured bindings. */
struct GTY(()) lang_decl_decomp {
struct lang_decl_min min;
/* The artificial underlying "e" variable of the structured binding
variable. */
tree base;
};
/* DECL_LANG_SPECIFIC for all types. It would be nice to just make this a
union rather than a struct containing a union as its only field, but
tree.h declares it as a struct. */
struct GTY(()) lang_decl {
union GTY((desc ("%h.base.selector"))) lang_decl_u {
/* Nothing of only the base type exists. */
struct lang_decl_base GTY ((default)) base;
struct lang_decl_min GTY((tag ("lds_min"))) min;
struct lang_decl_fn GTY ((tag ("lds_fn"))) fn;
struct lang_decl_ns GTY((tag ("lds_ns"))) ns;
struct lang_decl_parm GTY((tag ("lds_parm"))) parm;
struct lang_decl_decomp GTY((tag ("lds_decomp"))) decomp;
} u;
};
/* Looks through a template (if present) to find what it declares. */
#define STRIP_TEMPLATE(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL ? DECL_TEMPLATE_RESULT (NODE) : NODE)
#if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007)
#define LANG_DECL_MIN_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (!LANG_DECL_HAS_MIN (NODE)) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.min; })
/* We want to be able to check DECL_CONSTRUCTOR_P and such on a function
template, not just on a FUNCTION_DECL. So when looking for things in
lang_decl_fn, look down through a TEMPLATE_DECL into its result. */
#define LANG_DECL_FN_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (STRIP_TEMPLATE (NODE)); \
if (!DECL_DECLARES_FUNCTION_P (NODE) \
|| lt->u.base.selector != lds_fn) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.fn; })
#define LANG_DECL_NS_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (TREE_CODE (NODE) != NAMESPACE_DECL \
|| lt->u.base.selector != lds_ns) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.ns; })
#define LANG_DECL_PARM_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (TREE_CODE (NODE) != PARM_DECL \
|| lt->u.base.selector != lds_parm) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.parm; })
#define LANG_DECL_DECOMP_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (!VAR_P (NODE) \
|| lt->u.base.selector != lds_decomp) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.decomp; })
#define LANG_DECL_U2_CHECK(NODE, TF) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (!LANG_DECL_HAS_MIN (NODE) || lt->u.base.u2sel != TF) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.min.u2; })
#else
#define LANG_DECL_MIN_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (NODE)->u.min)
#define LANG_DECL_FN_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (STRIP_TEMPLATE (NODE))->u.fn)
#define LANG_DECL_NS_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (NODE)->u.ns)
#define LANG_DECL_PARM_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (NODE)->u.parm)
#define LANG_DECL_DECOMP_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (NODE)->u.decomp)
#define LANG_DECL_U2_CHECK(NODE, TF) \
(&DECL_LANG_SPECIFIC (NODE)->u.min.u2)
#endif /* ENABLE_TREE_CHECKING */
/* For a FUNCTION_DECL or a VAR_DECL, the language linkage for the
declaration. Some entities (like a member function in a local
class, or a local variable) do not have linkage at all, and this
macro should not be used in those cases.
Implementation note: A FUNCTION_DECL without DECL_LANG_SPECIFIC was
created by language-independent code, and has C linkage. Most
VAR_DECLs have C++ linkage, and do not have DECL_LANG_SPECIFIC, but
we do create DECL_LANG_SPECIFIC for variables with non-C++ linkage. */
#define DECL_LANGUAGE(NODE) \
(DECL_LANG_SPECIFIC (NODE) \
? DECL_LANG_SPECIFIC (NODE)->u.base.language \
: (TREE_CODE (NODE) == FUNCTION_DECL \
? lang_c : lang_cplusplus))
/* Set the language linkage for NODE to LANGUAGE. */
#define SET_DECL_LANGUAGE(NODE, LANGUAGE) \
(DECL_LANG_SPECIFIC (NODE)->u.base.language = (LANGUAGE))
/* For FUNCTION_DECLs and TEMPLATE_DECLs: nonzero means that this function
is a constructor. */
#define DECL_CONSTRUCTOR_P(NODE) \
IDENTIFIER_CTOR_P (DECL_NAME (NODE))
/* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a complete
object. */
#define DECL_COMPLETE_CONSTRUCTOR_P(NODE) \
(DECL_NAME (NODE) == complete_ctor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a base
object. */
#define DECL_BASE_CONSTRUCTOR_P(NODE) \
(DECL_NAME (NODE) == base_ctor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a constructor, but not either the
specialized in-charge constructor or the specialized not-in-charge
constructor. */
#define DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P(NODE) \
(DECL_NAME (NODE) == ctor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a copy constructor. */
#define DECL_COPY_CONSTRUCTOR_P(NODE) \
(DECL_CONSTRUCTOR_P (NODE) && copy_fn_p (NODE) > 0)
/* Nonzero if NODE (a FUNCTION_DECL) is a move constructor. */
#define DECL_MOVE_CONSTRUCTOR_P(NODE) \
(DECL_CONSTRUCTOR_P (NODE) && move_fn_p (NODE))
/* Nonzero if NODE (a FUNCTION_DECL or TEMPLATE_DECL)
is a destructor. */
#define DECL_DESTRUCTOR_P(NODE) \
IDENTIFIER_DTOR_P (DECL_NAME (NODE))
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor, but not the
specialized in-charge constructor, in-charge deleting constructor,
or the base destructor. */
#define DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P(NODE) \
(DECL_NAME (NODE) == dtor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete
object. */
#define DECL_COMPLETE_DESTRUCTOR_P(NODE) \
(DECL_NAME (NODE) == complete_dtor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a base
object. */
#define DECL_BASE_DESTRUCTOR_P(NODE) \
(DECL_NAME (NODE) == base_dtor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete
object that deletes the object after it has been destroyed. */
#define DECL_DELETING_DESTRUCTOR_P(NODE) \
(DECL_NAME (NODE) == deleting_dtor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a cloned constructor or
destructor. */
#define DECL_CLONED_FUNCTION_P(NODE) (!!decl_cloned_function_p (NODE, true))
/* If DECL_CLONED_FUNCTION_P holds, this is the function that was
cloned. */
#define DECL_CLONED_FUNCTION(NODE) (*decl_cloned_function_p (NODE, false))
/* Perform an action for each clone of FN, if FN is a function with
clones. This macro should be used like:
FOR_EACH_CLONE (clone, fn)
{ ... }
*/
#define FOR_EACH_CLONE(CLONE, FN) \
if (!(TREE_CODE (FN) == FUNCTION_DECL \
&& (DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P (FN) \
|| DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P (FN))))\
; \
else \
for (CLONE = DECL_CHAIN (FN); \
CLONE && DECL_CLONED_FUNCTION_P (CLONE); \
CLONE = DECL_CHAIN (CLONE))
/* Nonzero if NODE has DECL_DISCRIMINATOR and not DECL_ACCESS. */
#define DECL_DISCRIMINATOR_P(NODE) \
(VAR_P (NODE) && DECL_FUNCTION_SCOPE_P (NODE))
/* Discriminator for name mangling. */
#define DECL_DISCRIMINATOR(NODE) (LANG_DECL_U2_CHECK (NODE, 1)->discriminator)
/* True iff DECL_DISCRIMINATOR is set for a DECL_DISCRIMINATOR_P decl. */
#define DECL_DISCRIMINATOR_SET_P(NODE) \
(DECL_LANG_SPECIFIC (NODE) && DECL_LANG_SPECIFIC (NODE)->u.base.u2sel == 1)
/* The index of a user-declared parameter in its function, starting at 1.
All artificial parameters will have index 0. */
#define DECL_PARM_INDEX(NODE) \
(LANG_DECL_PARM_CHECK (NODE)->index)
/* The level of a user-declared parameter in its function, starting at 1.
A parameter of the function will have level 1; a parameter of the first
nested function declarator (i.e. t in void f (void (*p)(T t))) will have
level 2. */
#define DECL_PARM_LEVEL(NODE) \
(LANG_DECL_PARM_CHECK (NODE)->level)
/* Nonzero if the VTT parm has been added to NODE. */
#define DECL_HAS_VTT_PARM_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->has_vtt_parm_p)
/* Nonzero if NODE is a FUNCTION_DECL for which a VTT parameter is
required. */
#define DECL_NEEDS_VTT_PARM_P(NODE) \
(CLASSTYPE_VBASECLASSES (DECL_CONTEXT (NODE)) \
&& (DECL_BASE_CONSTRUCTOR_P (NODE) \
|| DECL_BASE_DESTRUCTOR_P (NODE)))
/* Nonzero if NODE is a user-defined conversion operator. */
#define DECL_CONV_FN_P(NODE) IDENTIFIER_CONV_OP_P (DECL_NAME (NODE))
/* The type to which conversion operator FN converts to. */
#define DECL_CONV_FN_TYPE(FN) \
TREE_TYPE ((gcc_checking_assert (DECL_CONV_FN_P (FN)), DECL_NAME (FN)))
/* Nonzero if NODE, a static data member, was declared in its class as an
array of unknown bound. */
#define VAR_HAD_UNKNOWN_BOUND(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \
? DECL_LANG_SPECIFIC (NODE)->u.base.unknown_bound_p \
: false)
#define SET_VAR_HAD_UNKNOWN_BOUND(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.unknown_bound_p = true)
/* True iff decl NODE is for an overloaded operator. */
#define DECL_OVERLOADED_OPERATOR_P(NODE) \
IDENTIFIER_ANY_OP_P (DECL_NAME (NODE))
/* Nonzero if NODE is an assignment operator (including += and such). */
#define DECL_ASSIGNMENT_OPERATOR_P(NODE) \
IDENTIFIER_ASSIGN_OP_P (DECL_NAME (NODE))
/* NODE is a function_decl for an overloaded operator. Return its
compressed (raw) operator code. Note that this is not a TREE_CODE. */
#define DECL_OVERLOADED_OPERATOR_CODE_RAW(NODE) \
(LANG_DECL_FN_CHECK (NODE)->ovl_op_code)
/* DECL is an overloaded operator. Test whether it is for TREE_CODE
(a literal constant). */
#define DECL_OVERLOADED_OPERATOR_IS(DECL, CODE) \
(DECL_OVERLOADED_OPERATOR_CODE_RAW (DECL) == OVL_OP_##CODE)
/* For FUNCTION_DECLs: nonzero means that this function is a
constructor or a destructor with an extra in-charge parameter to
control whether or not virtual bases are constructed. */
#define DECL_HAS_IN_CHARGE_PARM_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->has_in_charge_parm_p)
/* Nonzero if DECL is a declaration of __builtin_constant_p. */
#define DECL_IS_BUILTIN_CONSTANT_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL \
&& DECL_BUILT_IN_CLASS (NODE) == BUILT_IN_NORMAL \
&& DECL_FUNCTION_CODE (NODE) == BUILT_IN_CONSTANT_P)
/* Nonzero for _DECL means that this decl appears in (or will appear
in) as a member in a RECORD_TYPE or UNION_TYPE node. It is also for
detecting circularity in case members are multiply defined. In the
case of a VAR_DECL, it is also used to determine how program storage
should be allocated. */
#define DECL_IN_AGGR_P(NODE) (DECL_LANG_FLAG_3 (NODE))
/* Nonzero for a VAR_DECL means that the variable's initialization (if
any) has been processed. (In general, DECL_INITIALIZED_P is
!DECL_EXTERNAL, but static data members may be initialized even if
not defined.) */
#define DECL_INITIALIZED_P(NODE) \
(TREE_LANG_FLAG_1 (VAR_DECL_CHECK (NODE)))
/* Nonzero for a VAR_DECL iff an explicit initializer was provided
or a non-trivial constructor is called. */
#define DECL_NONTRIVIALLY_INITIALIZED_P(NODE) \
(TREE_LANG_FLAG_3 (VAR_DECL_CHECK (NODE)))
/* Nonzero for a VAR_DECL that was initialized with a
constant-expression. */
#define DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P(NODE) \
(TREE_LANG_FLAG_2 (VAR_DECL_CHECK (NODE)))
/* Nonzero if the DECL was initialized in the class definition itself,
rather than outside the class. This is used for both static member
VAR_DECLS, and FUNCTION_DECLS that are defined in the class. */
#define DECL_INITIALIZED_IN_CLASS_P(DECL) \
(DECL_LANG_SPECIFIC (VAR_OR_FUNCTION_DECL_CHECK (DECL)) \
->u.base.initialized_in_class)
/* Nonzero if the DECL is used in the sense of 3.2 [basic.def.odr].
Only available for decls with DECL_LANG_SPECIFIC. */
#define DECL_ODR_USED(DECL) \
(DECL_LANG_SPECIFIC (VAR_OR_FUNCTION_DECL_CHECK (DECL)) \
->u.base.odr_used)
/* Nonzero for DECL means that this decl is just a friend declaration,
and should not be added to the list of members for this class. */
#define DECL_FRIEND_P(NODE) \
(DECL_LANG_SPECIFIC (TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK (NODE)) \
->u.base.friend_or_tls)
/* Nonzero if the thread-local variable was declared with __thread as
opposed to thread_local. */
#define DECL_GNU_TLS_P(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \
&& DECL_LANG_SPECIFIC (NODE)->u.base.friend_or_tls)
#define SET_DECL_GNU_TLS_P(NODE) \
(retrofit_lang_decl (VAR_DECL_CHECK (NODE)), \
DECL_LANG_SPECIFIC (NODE)->u.base.friend_or_tls = true)
/* A TREE_LIST of the types which have befriended this FUNCTION_DECL. */
#define DECL_BEFRIENDING_CLASSES(NODE) \
(LANG_DECL_FN_CHECK (NODE)->befriending_classes)
/* Nonzero for FUNCTION_DECL means that this decl is a static
member function. */
#define DECL_STATIC_FUNCTION_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->static_function)
/* Nonzero for FUNCTION_DECL means that this decl is a non-static
member function. */
#define DECL_NONSTATIC_MEMBER_FUNCTION_P(NODE) \
(TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE)
/* Nonzero for FUNCTION_DECL means that this decl is a member function
(static or non-static). */
#define DECL_FUNCTION_MEMBER_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) || DECL_STATIC_FUNCTION_P (NODE))
/* Nonzero for FUNCTION_DECL means that this member function
has `this' as const X *const. */
#define DECL_CONST_MEMFUNC_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \
&& CP_TYPE_CONST_P (TREE_TYPE (TREE_VALUE \
(TYPE_ARG_TYPES (TREE_TYPE (NODE))))))
/* Nonzero for FUNCTION_DECL means that this member function
has `this' as volatile X *const. */
#define DECL_VOLATILE_MEMFUNC_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \
&& CP_TYPE_VOLATILE_P (TREE_TYPE (TREE_VALUE \
(TYPE_ARG_TYPES (TREE_TYPE (NODE))))))
/* Nonzero for a DECL means that this member is a non-static member. */
#define DECL_NONSTATIC_MEMBER_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \
|| TREE_CODE (NODE) == FIELD_DECL)
/* Nonzero for _DECL means that this member object type
is mutable. */
#define DECL_MUTABLE_P(NODE) (DECL_LANG_FLAG_0 (NODE))
/* Nonzero for _DECL means that this constructor or conversion function is
non-converting. */
#define DECL_NONCONVERTING_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->nonconverting)
/* Nonzero for FUNCTION_DECL means that this member function is a pure
virtual function. */
#define DECL_PURE_VIRTUAL_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->pure_virtual)
/* True (in a FUNCTION_DECL) if NODE is a virtual function that is an
invalid overrider for a function from a base class. Once we have
complained about an invalid overrider we avoid complaining about it
again. */
#define DECL_INVALID_OVERRIDER_P(NODE) \
(DECL_LANG_FLAG_4 (NODE))
/* True (in a FUNCTION_DECL) if NODE is a function declared with
an override virt-specifier */
#define DECL_OVERRIDE_P(NODE) (TREE_LANG_FLAG_0 (NODE))
/* The thunks associated with NODE, a FUNCTION_DECL. */
#define DECL_THUNKS(NODE) \
(DECL_VIRTUAL_P (NODE) ? LANG_DECL_FN_CHECK (NODE)->context : NULL_TREE)
/* Set DECL_THUNKS. */
#define SET_DECL_THUNKS(NODE,THUNKS) \
(LANG_DECL_FN_CHECK (NODE)->context = (THUNKS))
/* If NODE, a FUNCTION_DECL, is a C++11 inheriting constructor, then this
is the constructor it inherits from. */
#define DECL_INHERITED_CTOR(NODE) \
(DECL_DECLARES_FUNCTION_P (NODE) && DECL_CONSTRUCTOR_P (NODE) \
? LANG_DECL_FN_CHECK (NODE)->context : NULL_TREE)
/* And this is the base that constructor comes from. */
#define DECL_INHERITED_CTOR_BASE(NODE) \
(DECL_INHERITED_CTOR (NODE) \
? DECL_CONTEXT (flag_new_inheriting_ctors \
? strip_inheriting_ctors (NODE) \
: DECL_INHERITED_CTOR (NODE)) \
: NULL_TREE)
/* Set the inherited base. */
#define SET_DECL_INHERITED_CTOR(NODE,INH) \
(LANG_DECL_FN_CHECK (NODE)->context = (INH))
/* Nonzero if NODE is a thunk, rather than an ordinary function. */
#define DECL_THUNK_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL \
&& DECL_LANG_SPECIFIC (NODE) \
&& LANG_DECL_FN_CHECK (NODE)->thunk_p)
/* Set DECL_THUNK_P for node. */
#define SET_DECL_THUNK_P(NODE, THIS_ADJUSTING) \
(LANG_DECL_FN_CHECK (NODE)->thunk_p = 1, \
LANG_DECL_FN_CHECK (NODE)->this_thunk_p = (THIS_ADJUSTING))
/* Nonzero if NODE is a this pointer adjusting thunk. */
#define DECL_THIS_THUNK_P(NODE) \
(DECL_THUNK_P (NODE) && LANG_DECL_FN_CHECK (NODE)->this_thunk_p)
/* Nonzero if NODE is a result pointer adjusting thunk. */
#define DECL_RESULT_THUNK_P(NODE) \
(DECL_THUNK_P (NODE) && !LANG_DECL_FN_CHECK (NODE)->this_thunk_p)
/* Nonzero if NODE is a FUNCTION_DECL, but not a thunk. */
#define DECL_NON_THUNK_FUNCTION_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL && !DECL_THUNK_P (NODE))
/* Nonzero if NODE is `extern "C"'. */
#define DECL_EXTERN_C_P(NODE) \
(DECL_LANGUAGE (NODE) == lang_c)
/* Nonzero if NODE is an `extern "C"' function. */
#define DECL_EXTERN_C_FUNCTION_P(NODE) \
(DECL_NON_THUNK_FUNCTION_P (NODE) && DECL_EXTERN_C_P (NODE))
/* True iff DECL is an entity with vague linkage whose definition is
available in this translation unit. */
#define DECL_REPO_AVAILABLE_P(NODE) \
(DECL_LANG_SPECIFIC (NODE)->u.base.repo_available_p)
/* True if DECL is declared 'constexpr'. */
#define DECL_DECLARED_CONSTEXPR_P(DECL) \
DECL_LANG_FLAG_8 (VAR_OR_FUNCTION_DECL_CHECK (STRIP_TEMPLATE (DECL)))
// True if NODE was declared as 'concept'. The flag implies that the
// declaration is constexpr, that the declaration cannot be specialized or
// refined, and that the result type must be convertible to bool.
#define DECL_DECLARED_CONCEPT_P(NODE) \
(DECL_LANG_SPECIFIC (NODE)->u.base.concept_p)
/* Nonzero if this DECL is the __PRETTY_FUNCTION__ variable in a
template function. */
#define DECL_PRETTY_FUNCTION_P(NODE) \
(DECL_NAME (NODE) \
&& id_equal (DECL_NAME (NODE), "__PRETTY_FUNCTION__"))
/* Nonzero if the variable was declared to be thread-local.
We need a special C++ version of this test because the middle-end
DECL_THREAD_LOCAL_P uses the symtab, so we can't use it for
templates. */
#define CP_DECL_THREAD_LOCAL_P(NODE) \
(TREE_LANG_FLAG_0 (VAR_DECL_CHECK (NODE)))
/* The _TYPE context in which this _DECL appears. This field holds the
class where a virtual function instance is actually defined. */
#define DECL_CLASS_CONTEXT(NODE) \
(DECL_CLASS_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : NULL_TREE)
/* For a non-member friend function, the class (if any) in which this
friend was defined. For example, given:
struct S { friend void f () { ... } };
the DECL_FRIEND_CONTEXT for `f' will be `S'. */
#define DECL_FRIEND_CONTEXT(NODE) \
((DECL_DECLARES_FUNCTION_P (NODE) \
&& DECL_FRIEND_P (NODE) && !DECL_FUNCTION_MEMBER_P (NODE)) \
? LANG_DECL_FN_CHECK (NODE)->context \
: NULL_TREE)
/* Set the DECL_FRIEND_CONTEXT for NODE to CONTEXT. */
#define SET_DECL_FRIEND_CONTEXT(NODE, CONTEXT) \
(LANG_DECL_FN_CHECK (NODE)->context = (CONTEXT))
#define CP_DECL_CONTEXT(NODE) \
(!DECL_FILE_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : global_namespace)
#define CP_TYPE_CONTEXT(NODE) \
(!TYPE_FILE_SCOPE_P (NODE) ? TYPE_CONTEXT (NODE) : global_namespace)
#define FROB_CONTEXT(NODE) \
((NODE) == global_namespace ? DECL_CONTEXT (NODE) : (NODE))
/* 1 iff NODE has namespace scope, including the global namespace. */
#define DECL_NAMESPACE_SCOPE_P(NODE) \
(!DECL_TEMPLATE_PARM_P (NODE) \
&& TREE_CODE (CP_DECL_CONTEXT (NODE)) == NAMESPACE_DECL)
#define TYPE_NAMESPACE_SCOPE_P(NODE) \
(TREE_CODE (CP_TYPE_CONTEXT (NODE)) == NAMESPACE_DECL)
#define NAMESPACE_SCOPE_P(NODE) \
((DECL_P (NODE) && DECL_NAMESPACE_SCOPE_P (NODE)) \
|| (TYPE_P (NODE) && TYPE_NAMESPACE_SCOPE_P (NODE)))
/* 1 iff NODE is a class member. */
#define DECL_CLASS_SCOPE_P(NODE) \
(DECL_CONTEXT (NODE) && TYPE_P (DECL_CONTEXT (NODE)))
#define TYPE_CLASS_SCOPE_P(NODE) \
(TYPE_CONTEXT (NODE) && TYPE_P (TYPE_CONTEXT (NODE)))
/* 1 iff NODE is function-local. */
#define DECL_FUNCTION_SCOPE_P(NODE) \
(DECL_CONTEXT (NODE) \
&& TREE_CODE (DECL_CONTEXT (NODE)) == FUNCTION_DECL)
#define TYPE_FUNCTION_SCOPE_P(NODE) \
(TYPE_CONTEXT (NODE) && TREE_CODE (TYPE_CONTEXT (NODE)) == FUNCTION_DECL)
/* 1 iff VAR_DECL node NODE is a type-info decl. This flag is set for
both the primary typeinfo object and the associated NTBS name. */
#define DECL_TINFO_P(NODE) TREE_LANG_FLAG_4 (VAR_DECL_CHECK (NODE))
/* 1 iff VAR_DECL node NODE is virtual table or VTT. */
#define DECL_VTABLE_OR_VTT_P(NODE) TREE_LANG_FLAG_5 (VAR_DECL_CHECK (NODE))
/* 1 iff FUNCTION_TYPE or METHOD_TYPE has a ref-qualifier (either & or &&). */
#define FUNCTION_REF_QUALIFIED(NODE) \
TREE_LANG_FLAG_4 (FUNC_OR_METHOD_CHECK (NODE))
/* 1 iff FUNCTION_TYPE or METHOD_TYPE has &&-ref-qualifier. */
#define FUNCTION_RVALUE_QUALIFIED(NODE) \
TREE_LANG_FLAG_5 (FUNC_OR_METHOD_CHECK (NODE))
/* Returns 1 iff VAR_DECL is a construction virtual table.
DECL_VTABLE_OR_VTT_P will be true in this case and must be checked
before using this macro. */
#define DECL_CONSTRUCTION_VTABLE_P(NODE) \
TREE_LANG_FLAG_6 (VAR_DECL_CHECK (NODE))
/* 1 iff NODE is function-local, but for types. */
#define LOCAL_CLASS_P(NODE) \
(decl_function_context (TYPE_MAIN_DECL (NODE)) != NULL_TREE)
/* The nesting depth of namespace, class or function. Makes is_ancestor much
simpler. Only 8 bits available. */
#define SCOPE_DEPTH(NODE) \
(NAMESPACE_DECL_CHECK (NODE)->base.u.bits.address_space)
/* Whether the namepace is an inline namespace. */
#define DECL_NAMESPACE_INLINE_P(NODE) \
TREE_LANG_FLAG_0 (NAMESPACE_DECL_CHECK (NODE))
/* In a NAMESPACE_DECL, a vector of using directives. */
#define DECL_NAMESPACE_USING(NODE) \
(LANG_DECL_NS_CHECK (NODE)->usings)
/* In a NAMESPACE_DECL, a vector of inline namespaces. */
#define DECL_NAMESPACE_INLINEES(NODE) \
(LANG_DECL_NS_CHECK (NODE)->inlinees)
/* Pointer to hash_map from IDENTIFIERS to DECLS */
#define DECL_NAMESPACE_BINDINGS(NODE) \
(LANG_DECL_NS_CHECK (NODE)->bindings)
/* In a NAMESPACE_DECL, points to the original namespace if this is
a namespace alias. */
#define DECL_NAMESPACE_ALIAS(NODE) \
DECL_ABSTRACT_ORIGIN (NAMESPACE_DECL_CHECK (NODE))
#define ORIGINAL_NAMESPACE(NODE) \
(DECL_NAMESPACE_ALIAS (NODE) ? DECL_NAMESPACE_ALIAS (NODE) : (NODE))
/* Nonzero if NODE is the std namespace. */
#define DECL_NAMESPACE_STD_P(NODE) \
(TREE_CODE (NODE) == NAMESPACE_DECL \
&& CP_DECL_CONTEXT (NODE) == global_namespace \
&& DECL_NAME (NODE) == std_identifier)
/* In a TREE_LIST in an attribute list, indicates that the attribute
must be applied at instantiation time. */
#define ATTR_IS_DEPENDENT(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE))
/* In a TREE_LIST in the argument of attribute abi_tag, indicates that the tag
was inherited from a template parameter, not explicitly indicated. */
#define ABI_TAG_IMPLICIT(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE))
extern tree decl_shadowed_for_var_lookup (tree);
extern void decl_shadowed_for_var_insert (tree, tree);
/* Non zero if this is a using decl for a dependent scope. */
#define DECL_DEPENDENT_P(NODE) DECL_LANG_FLAG_0 (USING_DECL_CHECK (NODE))
/* The scope named in a using decl. */
#define USING_DECL_SCOPE(NODE) TREE_TYPE (USING_DECL_CHECK (NODE))
/* The decls named by a using decl. */
#define USING_DECL_DECLS(NODE) DECL_INITIAL (USING_DECL_CHECK (NODE))
/* Non zero if the using decl refers to a dependent type. */
#define USING_DECL_TYPENAME_P(NODE) DECL_LANG_FLAG_1 (USING_DECL_CHECK (NODE))
/* In a VAR_DECL, true if we have a shadowed local variable
in the shadowed var table for this VAR_DECL. */
#define DECL_HAS_SHADOWED_FOR_VAR_P(NODE) \
(VAR_DECL_CHECK (NODE)->decl_with_vis.shadowed_for_var_p)
/* In a VAR_DECL for a variable declared in a for statement,
this is the shadowed (local) variable. */
#define DECL_SHADOWED_FOR_VAR(NODE) \
(DECL_HAS_SHADOWED_FOR_VAR_P(NODE) ? decl_shadowed_for_var_lookup (NODE) : NULL)
#define SET_DECL_SHADOWED_FOR_VAR(NODE, VAL) \
(decl_shadowed_for_var_insert (NODE, VAL))
/* In a FUNCTION_DECL, this is nonzero if this function was defined in
the class definition. We have saved away the text of the function,
but have not yet processed it. */
#define DECL_PENDING_INLINE_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->pending_inline_p)
/* If DECL_PENDING_INLINE_P holds, this is the saved text of the
function. */
#define DECL_PENDING_INLINE_INFO(NODE) \
(LANG_DECL_FN_CHECK (NODE)->u.pending_inline_info)
/* Nonzero for TYPE_DECL means that it was written 'using name = type'. */
#define TYPE_DECL_ALIAS_P(NODE) \
DECL_LANG_FLAG_6 (TYPE_DECL_CHECK (NODE))
/* Nonzero for TEMPLATE_DECL means that it is a 'complex' alias template. */
#define TEMPLATE_DECL_COMPLEX_ALIAS_P(NODE) \
DECL_LANG_FLAG_2 (TEMPLATE_DECL_CHECK (NODE))
/* Nonzero for a type which is an alias for another type; i.e, a type
which declaration was written 'using name-of-type =
another-type'. */
#define TYPE_ALIAS_P(NODE) \
(TYPE_P (NODE) \
&& TYPE_NAME (NODE) \
&& TREE_CODE (TYPE_NAME (NODE)) == TYPE_DECL \
&& TYPE_DECL_ALIAS_P (TYPE_NAME (NODE)))
/* If non-NULL for a VAR_DECL, FUNCTION_DECL, TYPE_DECL or
TEMPLATE_DECL, the entity is either a template specialization (if
DECL_USE_TEMPLATE is nonzero) or the abstract instance of the
template itself.
In either case, DECL_TEMPLATE_INFO is a TREE_LIST, whose
TREE_PURPOSE is the TEMPLATE_DECL of which this entity is a
specialization or abstract instance. The TREE_VALUE is the
template arguments used to specialize the template.
Consider:
template <typename T> struct S { friend void f(T) {} };
In this case, S<int>::f is, from the point of view of the compiler,
an instantiation of a template -- but, from the point of view of
the language, each instantiation of S results in a wholly unrelated
global function f. In this case, DECL_TEMPLATE_INFO for S<int>::f
will be non-NULL, but DECL_USE_TEMPLATE will be zero. */
#define DECL_TEMPLATE_INFO(NODE) \
(DECL_LANG_SPECIFIC (VAR_TEMPL_TYPE_FIELD_OR_FUNCTION_DECL_CHECK (NODE)) \
->u.min.template_info)
/* For a lambda capture proxy, its captured variable. */
#define DECL_CAPTURED_VARIABLE(NODE) \
(LANG_DECL_U2_CHECK (NODE, 0)->access)
/* For a VAR_DECL, indicates that the variable is actually a
non-static data member of anonymous union that has been promoted to
variable status. */
#define DECL_ANON_UNION_VAR_P(NODE) \
(DECL_LANG_FLAG_4 (VAR_DECL_CHECK (NODE)))
/* Template information for a RECORD_TYPE or UNION_TYPE. */
#define CLASSTYPE_TEMPLATE_INFO(NODE) \
(TYPE_LANG_SLOT_1 (RECORD_OR_UNION_CHECK (NODE)))
/* Template information for a template template parameter. */
#define TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO(NODE) \
(TYPE_LANG_SLOT_1 (BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK (NODE)))
/* Template information for an ENUMERAL_, RECORD_, UNION_TYPE, or
BOUND_TEMPLATE_TEMPLATE_PARM type. This ignores any alias
templateness of NODE. It'd be nice if this could unconditionally
access the slot, rather than return NULL if given a
non-templatable type. */
#define TYPE_TEMPLATE_INFO(NODE) \
(TREE_CODE (NODE) == ENUMERAL_TYPE \
|| TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM \
|| RECORD_OR_UNION_TYPE_P (NODE) \
? TYPE_LANG_SLOT_1 (NODE) : NULL_TREE)
/* Template information (if any) for an alias type. */
#define TYPE_ALIAS_TEMPLATE_INFO(NODE) \
(DECL_LANG_SPECIFIC (TYPE_NAME (NODE)) \
? DECL_TEMPLATE_INFO (TYPE_NAME (NODE)) \
: NULL_TREE)
/* If NODE is a type alias, this accessor returns the template info
for the alias template (if any). Otherwise behave as
TYPE_TEMPLATE_INFO. */
#define TYPE_TEMPLATE_INFO_MAYBE_ALIAS(NODE) \
(TYPE_ALIAS_P (NODE) \
? TYPE_ALIAS_TEMPLATE_INFO (NODE) \
: TYPE_TEMPLATE_INFO (NODE))
/* Set the template information for an ENUMERAL_, RECORD_, or
UNION_TYPE to VAL. */
#define SET_TYPE_TEMPLATE_INFO(NODE, VAL) \
(TREE_CODE (NODE) == ENUMERAL_TYPE \
|| (CLASS_TYPE_P (NODE) && !TYPE_ALIAS_P (NODE)) \
? (TYPE_LANG_SLOT_1 (NODE) = (VAL)) \
: (DECL_TEMPLATE_INFO (TYPE_NAME (NODE)) = (VAL)))
#define TI_TEMPLATE(NODE) TREE_TYPE (TEMPLATE_INFO_CHECK (NODE))
#define TI_ARGS(NODE) TREE_CHAIN (TEMPLATE_INFO_CHECK (NODE))
#define TI_PENDING_TEMPLATE_FLAG(NODE) TREE_LANG_FLAG_1 (NODE)
/* For a given TREE_VEC containing a template argument list,
this property contains the number of arguments that are not
defaulted. */
#define NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) TREE_CHAIN (TREE_VEC_CHECK (NODE))
/* Below are the setter and getter of the NON_DEFAULT_TEMPLATE_ARGS_COUNT
property. */
#define SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE, INT_VALUE) \
NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) = build_int_cst (NULL_TREE, INT_VALUE)
#if CHECKING_P
#define GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \
int_cst_value (NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE))
#else
#define GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \
NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE) \
? int_cst_value (NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE)) \
: TREE_VEC_LENGTH (INNERMOST_TEMPLATE_ARGS (NODE))
#endif
/* The list of typedefs - used in the template - that need
access checking at template instantiation time.
FIXME this should be associated with the TEMPLATE_DECL, not the
TEMPLATE_INFO. */
#define TI_TYPEDEFS_NEEDING_ACCESS_CHECKING(NODE) \
((struct tree_template_info*)TEMPLATE_INFO_CHECK \
(NODE))->typedefs_needing_access_checking
/* We use TREE_VECs to hold template arguments. If there is only one
level of template arguments, then the TREE_VEC contains the
arguments directly. If there is more than one level of template
arguments, then each entry in the TREE_VEC is itself a TREE_VEC,
containing the template arguments for a single level. The first
entry in the outer TREE_VEC is the outermost level of template
parameters; the last is the innermost.
It is incorrect to ever form a template argument vector containing
only one level of arguments, but which is a TREE_VEC containing as
its only entry the TREE_VEC for that level.
For each TREE_VEC containing the template arguments for a single
level, it's possible to get or set the number of non defaulted
template arguments by using the accessor macros
GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT or
SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT. */
/* Nonzero if the template arguments is actually a vector of vectors,
rather than just a vector. */
#define TMPL_ARGS_HAVE_MULTIPLE_LEVELS(NODE) \
(NODE && TREE_VEC_LENGTH (NODE) && TREE_VEC_ELT (NODE, 0) \
&& TREE_CODE (TREE_VEC_ELT (NODE, 0)) == TREE_VEC)
/* The depth of a template argument vector. When called directly by
the parser, we use a TREE_LIST rather than a TREE_VEC to represent
template arguments. In fact, we may even see NULL_TREE if there
are no template arguments. In both of those cases, there is only
one level of template arguments. */
#define TMPL_ARGS_DEPTH(NODE) \
(TMPL_ARGS_HAVE_MULTIPLE_LEVELS (NODE) ? TREE_VEC_LENGTH (NODE) : 1)
/* The LEVELth level of the template ARGS. The outermost level of
args is level 1, not level 0. */
#define TMPL_ARGS_LEVEL(ARGS, LEVEL) \
(TMPL_ARGS_HAVE_MULTIPLE_LEVELS (ARGS) \
? TREE_VEC_ELT (ARGS, (LEVEL) - 1) : (ARGS))
/* Set the LEVELth level of the template ARGS to VAL. This macro does
not work with single-level argument vectors. */
#define SET_TMPL_ARGS_LEVEL(ARGS, LEVEL, VAL) \
(TREE_VEC_ELT (ARGS, (LEVEL) - 1) = (VAL))
/* Accesses the IDXth parameter in the LEVELth level of the ARGS. */
#define TMPL_ARG(ARGS, LEVEL, IDX) \
(TREE_VEC_ELT (TMPL_ARGS_LEVEL (ARGS, LEVEL), IDX))
/* Given a single level of template arguments in NODE, return the
number of arguments. */
#define NUM_TMPL_ARGS(NODE) \
(TREE_VEC_LENGTH (NODE))
/* Returns the innermost level of template arguments in ARGS. */
#define INNERMOST_TEMPLATE_ARGS(NODE) \
(get_innermost_template_args ((NODE), 1))
/* The number of levels of template parameters given by NODE. */
#define TMPL_PARMS_DEPTH(NODE) \
((HOST_WIDE_INT) TREE_INT_CST_LOW (TREE_PURPOSE (NODE)))
/* The TEMPLATE_DECL instantiated or specialized by NODE. This
TEMPLATE_DECL will be the immediate parent, not the most general
template. For example, in:
template <class T> struct S { template <class U> void f(U); }
the FUNCTION_DECL for S<int>::f<double> will have, as its
DECL_TI_TEMPLATE, `template <class U> S<int>::f<U>'.
As a special case, for a member friend template of a template
class, this value will not be a TEMPLATE_DECL, but rather an
IDENTIFIER_NODE or OVERLOAD indicating the name of the template and
any explicit template arguments provided. For example, in:
template <class T> struct S { friend void f<int>(int, double); }
the DECL_TI_TEMPLATE will be an IDENTIFIER_NODE for `f' and the
DECL_TI_ARGS will be {int}.
For a FIELD_DECL with a non-static data member initializer, this value
is the FIELD_DECL it was instantiated from. */
#define DECL_TI_TEMPLATE(NODE) TI_TEMPLATE (DECL_TEMPLATE_INFO (NODE))
/* The template arguments used to obtain this decl from the most
general form of DECL_TI_TEMPLATE. For the example given for
DECL_TI_TEMPLATE, the DECL_TI_ARGS will be {int, double}. These
are always the full set of arguments required to instantiate this
declaration from the most general template specialized here. */
#define DECL_TI_ARGS(NODE) TI_ARGS (DECL_TEMPLATE_INFO (NODE))
/* The TEMPLATE_DECL associated with NODE, a class type. Even if NODE
will be generated from a partial specialization, the TEMPLATE_DECL
referred to here will be the original template. For example,
given:
template <typename T> struct S {};
template <typename T> struct S<T*> {};
the CLASSTPYE_TI_TEMPLATE for S<int*> will be S, not the S<T*>. */
#define CLASSTYPE_TI_TEMPLATE(NODE) TI_TEMPLATE (CLASSTYPE_TEMPLATE_INFO (NODE))
#define CLASSTYPE_TI_ARGS(NODE) TI_ARGS (CLASSTYPE_TEMPLATE_INFO (NODE))
/* For a template instantiation TYPE, returns the TYPE corresponding
to the primary template. Otherwise returns TYPE itself. */
#define CLASSTYPE_PRIMARY_TEMPLATE_TYPE(TYPE) \
((CLASSTYPE_USE_TEMPLATE ((TYPE)) \
&& !CLASSTYPE_TEMPLATE_SPECIALIZATION ((TYPE))) \
? TREE_TYPE (DECL_TEMPLATE_RESULT (DECL_PRIMARY_TEMPLATE \
(CLASSTYPE_TI_TEMPLATE ((TYPE))))) \
: (TYPE))
/* Like CLASS_TI_TEMPLATE, but also works for ENUMERAL_TYPEs. */
#define TYPE_TI_TEMPLATE(NODE) \
(TI_TEMPLATE (TYPE_TEMPLATE_INFO (NODE)))
/* Like DECL_TI_ARGS, but for an ENUMERAL_, RECORD_, or UNION_TYPE. */
#define TYPE_TI_ARGS(NODE) \
(TI_ARGS (TYPE_TEMPLATE_INFO (NODE)))
#define INNERMOST_TEMPLATE_PARMS(NODE) TREE_VALUE (NODE)
/* Nonzero if NODE (a TEMPLATE_DECL) is a member template, in the
sense of [temp.mem]. */
#define DECL_MEMBER_TEMPLATE_P(NODE) \
(DECL_LANG_FLAG_1 (TEMPLATE_DECL_CHECK (NODE)))
/* Nonzero if the NODE corresponds to the template parameters for a
member template, whose inline definition is being processed after
the class definition is complete. */
#define TEMPLATE_PARMS_FOR_INLINE(NODE) TREE_LANG_FLAG_1 (NODE)
/* Determine if a declaration (PARM_DECL or FIELD_DECL) is a pack. */
#define DECL_PACK_P(NODE) \
(DECL_P (NODE) && PACK_EXPANSION_P (TREE_TYPE (NODE)))
/* Determines if NODE is an expansion of one or more parameter packs,
e.g., a TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION. */
#define PACK_EXPANSION_P(NODE) \
(TREE_CODE (NODE) == TYPE_PACK_EXPANSION \
|| TREE_CODE (NODE) == EXPR_PACK_EXPANSION)
/* Extracts the type or expression pattern from a TYPE_PACK_EXPANSION or
EXPR_PACK_EXPANSION. */
#define PACK_EXPANSION_PATTERN(NODE) \
(TREE_CODE (NODE) == TYPE_PACK_EXPANSION ? TREE_TYPE (NODE) \
: TREE_OPERAND (NODE, 0))
/* Sets the type or expression pattern for a TYPE_PACK_EXPANSION or
EXPR_PACK_EXPANSION. */
#define SET_PACK_EXPANSION_PATTERN(NODE,VALUE) \
if (TREE_CODE (NODE) == TYPE_PACK_EXPANSION) \
TREE_TYPE (NODE) = VALUE; \
else \
TREE_OPERAND (NODE, 0) = VALUE
/* The list of parameter packs used in the PACK_EXPANSION_* node. The
TREE_VALUE of each TREE_LIST contains the parameter packs. */
#define PACK_EXPANSION_PARAMETER_PACKS(NODE) \
*(TREE_CODE (NODE) == EXPR_PACK_EXPANSION \
? &TREE_OPERAND (NODE, 1) \
: &TYPE_MIN_VALUE_RAW (TYPE_PACK_EXPANSION_CHECK (NODE)))
/* Any additional template args to be applied when substituting into
the pattern, set by tsubst_pack_expansion for partial instantiations.
If this is a TREE_LIST, the TREE_VALUE of the first element is the
usual template argument TREE_VEC, and the TREE_PURPOSE of later elements
are enclosing functions that provided function parameter packs we'll need
to map appropriately. */
#define PACK_EXPANSION_EXTRA_ARGS(NODE) \
*(TREE_CODE (NODE) == TYPE_PACK_EXPANSION \
? &TYPE_MAX_VALUE_RAW (NODE) \
: &TREE_OPERAND ((NODE), 2))
/* True iff this pack expansion is within a function context. */
#define PACK_EXPANSION_LOCAL_P(NODE) TREE_LANG_FLAG_0 (NODE)
/* True iff this pack expansion is for sizeof.... */
#define PACK_EXPANSION_SIZEOF_P(NODE) TREE_LANG_FLAG_1 (NODE)
/* True iff the wildcard can match a template parameter pack. */
#define WILDCARD_PACK_P(NODE) TREE_LANG_FLAG_0 (NODE)
/* Determine if this is an argument pack. */
#define ARGUMENT_PACK_P(NODE) \
(TREE_CODE (NODE) == TYPE_ARGUMENT_PACK \
|| TREE_CODE (NODE) == NONTYPE_ARGUMENT_PACK)
/* The arguments stored in an argument pack. Arguments are stored in a
TREE_VEC, which may have length zero. */
#define ARGUMENT_PACK_ARGS(NODE) \
(TREE_CODE (NODE) == TYPE_ARGUMENT_PACK? TREE_TYPE (NODE) \
: TREE_OPERAND (NODE, 0))
/* Set the arguments stored in an argument pack. VALUE must be a
TREE_VEC. */
#define SET_ARGUMENT_PACK_ARGS(NODE,VALUE) \
if (TREE_CODE (NODE) == TYPE_ARGUMENT_PACK) \
TREE_TYPE (NODE) = VALUE; \
else \
TREE_OPERAND (NODE, 0) = VALUE
/* Whether the argument pack is "incomplete", meaning that more
arguments can still be deduced. Incomplete argument packs are only
used when the user has provided an explicit template argument list
for a variadic function template. Some of the explicit template
arguments will be placed into the beginning of the argument pack,
but additional arguments might still be deduced. */
#define ARGUMENT_PACK_INCOMPLETE_P(NODE) \
TREE_ADDRESSABLE (ARGUMENT_PACK_ARGS (NODE))
/* When ARGUMENT_PACK_INCOMPLETE_P, stores the explicit template
arguments used to fill this pack. */
#define ARGUMENT_PACK_EXPLICIT_ARGS(NODE) \
TREE_TYPE (ARGUMENT_PACK_ARGS (NODE))
/* In an ARGUMENT_PACK_SELECT, the argument pack from which an
argument will be selected. */
#define ARGUMENT_PACK_SELECT_FROM_PACK(NODE) \
(((struct tree_argument_pack_select *)ARGUMENT_PACK_SELECT_CHECK (NODE))->argument_pack)
/* In an ARGUMENT_PACK_SELECT, the index of the argument we want to
select. */
#define ARGUMENT_PACK_SELECT_INDEX(NODE) \
(((struct tree_argument_pack_select *)ARGUMENT_PACK_SELECT_CHECK (NODE))->index)
#define FOLD_EXPR_CHECK(NODE) \
TREE_CHECK4 (NODE, UNARY_LEFT_FOLD_EXPR, UNARY_RIGHT_FOLD_EXPR, \
BINARY_LEFT_FOLD_EXPR, BINARY_RIGHT_FOLD_EXPR)
#define BINARY_FOLD_EXPR_CHECK(NODE) \
TREE_CHECK2 (NODE, BINARY_LEFT_FOLD_EXPR, BINARY_RIGHT_FOLD_EXPR)
/* True if NODE is UNARY_FOLD_EXPR or a BINARY_FOLD_EXPR */
#define FOLD_EXPR_P(NODE) \
(TREE_CODE (NODE) == UNARY_LEFT_FOLD_EXPR \
|| TREE_CODE (NODE) == UNARY_RIGHT_FOLD_EXPR \
|| TREE_CODE (NODE) == BINARY_LEFT_FOLD_EXPR \
|| TREE_CODE (NODE) == BINARY_RIGHT_FOLD_EXPR)
/* True when NODE is a fold over a compound assignment operator. */
#define FOLD_EXPR_MODIFY_P(NODE) \
TREE_LANG_FLAG_0 (FOLD_EXPR_CHECK (NODE))
/* An INTEGER_CST containing the tree code of the folded operator. */
#define FOLD_EXPR_OP(NODE) \
TREE_OPERAND (FOLD_EXPR_CHECK (NODE), 0)
/* The expression containing an unexpanded parameter pack. */
#define FOLD_EXPR_PACK(NODE) \
TREE_OPERAND (FOLD_EXPR_CHECK (NODE), 1)
/* In a binary fold expression, the argument with no unexpanded
parameter packs. */
#define FOLD_EXPR_INIT(NODE) \
TREE_OPERAND (BINARY_FOLD_EXPR_CHECK (NODE), 2)
/* In a FUNCTION_DECL, the saved language-specific per-function data. */
#define DECL_SAVED_FUNCTION_DATA(NODE) \
(LANG_DECL_FN_CHECK (FUNCTION_DECL_CHECK (NODE)) \
->u.saved_language_function)
/* True if NODE is an implicit INDIRECT_EXPR from convert_from_reference. */
#define REFERENCE_REF_P(NODE) \
(INDIRECT_REF_P (NODE) \
&& TREE_TYPE (TREE_OPERAND (NODE, 0)) \
&& (TREE_CODE (TREE_TYPE (TREE_OPERAND ((NODE), 0))) \
== REFERENCE_TYPE))
/* True if NODE is a REFERENCE_TYPE which is OK to instantiate to be a
reference to VLA type, because it's used for VLA capture. */
#define REFERENCE_VLA_OK(NODE) \
(TYPE_LANG_FLAG_5 (REFERENCE_TYPE_CHECK (NODE)))
#define NEW_EXPR_USE_GLOBAL(NODE) \
TREE_LANG_FLAG_0 (NEW_EXPR_CHECK (NODE))
#define DELETE_EXPR_USE_GLOBAL(NODE) \
TREE_LANG_FLAG_0 (DELETE_EXPR_CHECK (NODE))
#define DELETE_EXPR_USE_VEC(NODE) \
TREE_LANG_FLAG_1 (DELETE_EXPR_CHECK (NODE))
#define CALL_OR_AGGR_INIT_CHECK(NODE) \
TREE_CHECK2 ((NODE), CALL_EXPR, AGGR_INIT_EXPR)
/* Indicates that this is a non-dependent COMPOUND_EXPR which will
resolve to a function call. */
#define COMPOUND_EXPR_OVERLOADED(NODE) \
TREE_LANG_FLAG_0 (COMPOUND_EXPR_CHECK (NODE))
/* In a CALL_EXPR appearing in a template, true if Koenig lookup
should be performed at instantiation time. */
#define KOENIG_LOOKUP_P(NODE) TREE_LANG_FLAG_0 (CALL_EXPR_CHECK (NODE))
/* True if the arguments to NODE should be evaluated in left-to-right
order regardless of PUSH_ARGS_REVERSED. */
#define CALL_EXPR_ORDERED_ARGS(NODE) \
TREE_LANG_FLAG_3 (CALL_OR_AGGR_INIT_CHECK (NODE))
/* True if the arguments to NODE should be evaluated in right-to-left
order regardless of PUSH_ARGS_REVERSED. */
#define CALL_EXPR_REVERSE_ARGS(NODE) \
TREE_LANG_FLAG_5 (CALL_OR_AGGR_INIT_CHECK (NODE))
/* True if CALL_EXPR was written as an operator expression, not a function
call. */
#define CALL_EXPR_OPERATOR_SYNTAX(NODE) \
TREE_LANG_FLAG_6 (CALL_OR_AGGR_INIT_CHECK (NODE))
/* Indicates whether a string literal has been parenthesized. Such
usages are disallowed in certain circumstances. */
#define PAREN_STRING_LITERAL_P(NODE) \
TREE_LANG_FLAG_0 (STRING_CST_CHECK (NODE))
/* Indicates whether a COMPONENT_REF or a SCOPE_REF has been parenthesized, or
an INDIRECT_REF comes from parenthesizing a _DECL. Currently only set some
of the time in C++14 mode. */
#define REF_PARENTHESIZED_P(NODE) \
TREE_LANG_FLAG_2 (TREE_CHECK3 ((NODE), COMPONENT_REF, INDIRECT_REF, SCOPE_REF))
/* Nonzero if this AGGR_INIT_EXPR provides for initialization via a
constructor call, rather than an ordinary function call. */
#define AGGR_INIT_VIA_CTOR_P(NODE) \
TREE_LANG_FLAG_0 (AGGR_INIT_EXPR_CHECK (NODE))
/* Nonzero if expanding this AGGR_INIT_EXPR should first zero-initialize
the object. */
#define AGGR_INIT_ZERO_FIRST(NODE) \
TREE_LANG_FLAG_2 (AGGR_INIT_EXPR_CHECK (NODE))
/* Nonzero means that the call is the jump from a thunk to the
thunked-to function. */
#define AGGR_INIT_FROM_THUNK_P(NODE) \
(AGGR_INIT_EXPR_CHECK (NODE)->base.protected_flag)
/* AGGR_INIT_EXPR accessors. These are equivalent to the CALL_EXPR
accessors, except for AGGR_INIT_EXPR_SLOT (which takes the place of
CALL_EXPR_STATIC_CHAIN). */
#define AGGR_INIT_EXPR_FN(NODE) TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 1)
#define AGGR_INIT_EXPR_SLOT(NODE) \
TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 2)
#define AGGR_INIT_EXPR_ARG(NODE, I) \
TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), (I) + 3)
#define aggr_init_expr_nargs(NODE) (VL_EXP_OPERAND_LENGTH(NODE) - 3)
/* AGGR_INIT_EXPR_ARGP returns a pointer to the argument vector for NODE.
We can't use &AGGR_INIT_EXPR_ARG (NODE, 0) because that will complain if
the argument count is zero when checking is enabled. Instead, do
the pointer arithmetic to advance past the 3 fixed operands in a
AGGR_INIT_EXPR. That produces a valid pointer to just past the end of
the operand array, even if it's not valid to dereference it. */
#define AGGR_INIT_EXPR_ARGP(NODE) \
(&(TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 0)) + 3)
/* Abstract iterators for AGGR_INIT_EXPRs. */
/* Structure containing iterator state. */
struct aggr_init_expr_arg_iterator {
tree t; /* the aggr_init_expr */
int n; /* argument count */
int i; /* next argument index */
};
/* Initialize the abstract argument list iterator object ITER with the
arguments from AGGR_INIT_EXPR node EXP. */
inline void
init_aggr_init_expr_arg_iterator (tree exp,
aggr_init_expr_arg_iterator *iter)
{
iter->t = exp;
iter->n = aggr_init_expr_nargs (exp);
iter->i = 0;
}
/* Return the next argument from abstract argument list iterator object ITER,
and advance its state. Return NULL_TREE if there are no more arguments. */
inline tree
next_aggr_init_expr_arg (aggr_init_expr_arg_iterator *iter)
{
tree result;
if (iter->i >= iter->n)
return NULL_TREE;
result = AGGR_INIT_EXPR_ARG (iter->t, iter->i);
iter->i++;
return result;
}
/* Initialize the abstract argument list iterator object ITER, then advance
past and return the first argument. Useful in for expressions, e.g.
for (arg = first_aggr_init_expr_arg (exp, &iter); arg;
arg = next_aggr_init_expr_arg (&iter)) */
inline tree
first_aggr_init_expr_arg (tree exp, aggr_init_expr_arg_iterator *iter)
{
init_aggr_init_expr_arg_iterator (exp, iter);
return next_aggr_init_expr_arg (iter);
}
/* Test whether there are more arguments in abstract argument list iterator
ITER, without changing its state. */
inline bool
more_aggr_init_expr_args_p (const aggr_init_expr_arg_iterator *iter)
{
return (iter->i < iter->n);
}
/* Iterate through each argument ARG of AGGR_INIT_EXPR CALL, using variable
ITER (of type aggr_init_expr_arg_iterator) to hold the iteration state. */
#define FOR_EACH_AGGR_INIT_EXPR_ARG(arg, iter, call) \
for ((arg) = first_aggr_init_expr_arg ((call), &(iter)); (arg); \
(arg) = next_aggr_init_expr_arg (&(iter)))
/* VEC_INIT_EXPR accessors. */
#define VEC_INIT_EXPR_SLOT(NODE) TREE_OPERAND (VEC_INIT_EXPR_CHECK (NODE), 0)
#define VEC_INIT_EXPR_INIT(NODE) TREE_OPERAND (VEC_INIT_EXPR_CHECK (NODE), 1)
/* Indicates that a VEC_INIT_EXPR is a potential constant expression.
Only set when the current function is constexpr. */
#define VEC_INIT_EXPR_IS_CONSTEXPR(NODE) \
TREE_LANG_FLAG_0 (VEC_INIT_EXPR_CHECK (NODE))
/* Indicates that a VEC_INIT_EXPR is expressing value-initialization. */
#define VEC_INIT_EXPR_VALUE_INIT(NODE) \
TREE_LANG_FLAG_1 (VEC_INIT_EXPR_CHECK (NODE))
/* The condition under which this MUST_NOT_THROW_EXPR actually blocks
exceptions. NULL_TREE means 'true'. */
#define MUST_NOT_THROW_COND(NODE) \
TREE_OPERAND (MUST_NOT_THROW_EXPR_CHECK (NODE), 1)
/* The TYPE_MAIN_DECL for a class template type is a TYPE_DECL, not a
TEMPLATE_DECL. This macro determines whether or not a given class
type is really a template type, as opposed to an instantiation or
specialization of one. */
#define CLASSTYPE_IS_TEMPLATE(NODE) \
(CLASSTYPE_TEMPLATE_INFO (NODE) \
&& !CLASSTYPE_USE_TEMPLATE (NODE) \
&& PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (NODE)))
/* The name used by the user to name the typename type. Typically,
this is an IDENTIFIER_NODE, and the same as the DECL_NAME on the
corresponding TYPE_DECL. However, this may also be a
TEMPLATE_ID_EXPR if we had something like `typename X::Y<T>'. */
#define TYPENAME_TYPE_FULLNAME(NODE) \
(TYPE_VALUES_RAW (TYPENAME_TYPE_CHECK (NODE)))
/* True if a TYPENAME_TYPE was declared as an "enum". */
#define TYPENAME_IS_ENUM_P(NODE) \
(TREE_LANG_FLAG_0 (TYPENAME_TYPE_CHECK (NODE)))
/* True if a TYPENAME_TYPE was declared as a "class", "struct", or
"union". */
#define TYPENAME_IS_CLASS_P(NODE) \
(TREE_LANG_FLAG_1 (TYPENAME_TYPE_CHECK (NODE)))
/* True if a TYPENAME_TYPE is in the process of being resolved. */
#define TYPENAME_IS_RESOLVING_P(NODE) \
(TREE_LANG_FLAG_2 (TYPENAME_TYPE_CHECK (NODE)))
/* [class.virtual]
A class that declares or inherits a virtual function is called a
polymorphic class. */
#define TYPE_POLYMORPHIC_P(NODE) (TREE_LANG_FLAG_2 (NODE))
/* Nonzero if this class has a virtual function table pointer. */
#define TYPE_CONTAINS_VPTR_P(NODE) \
(TYPE_POLYMORPHIC_P (NODE) || CLASSTYPE_VBASECLASSES (NODE))
/* This flag is true of a local VAR_DECL if it was declared in a for
statement, but we are no longer in the scope of the for. */
#define DECL_DEAD_FOR_LOCAL(NODE) DECL_LANG_FLAG_7 (VAR_DECL_CHECK (NODE))
/* This flag is set on a VAR_DECL that is a DECL_DEAD_FOR_LOCAL
if we already emitted a warning about using it. */
#define DECL_ERROR_REPORTED(NODE) DECL_LANG_FLAG_0 (VAR_DECL_CHECK (NODE))
/* Nonzero if NODE is a FUNCTION_DECL (for a function with global
scope) declared in a local scope. */
#define DECL_LOCAL_FUNCTION_P(NODE) \
DECL_LANG_FLAG_0 (FUNCTION_DECL_CHECK (NODE))
/* Nonzero if NODE is the target for genericization of 'break' stmts. */
#define LABEL_DECL_BREAK(NODE) \
DECL_LANG_FLAG_0 (LABEL_DECL_CHECK (NODE))
/* Nonzero if NODE is the target for genericization of 'continue' stmts. */
#define LABEL_DECL_CONTINUE(NODE) \
DECL_LANG_FLAG_1 (LABEL_DECL_CHECK (NODE))
/* Nonzero if NODE is the target for genericization of 'return' stmts
in constructors/destructors of targetm.cxx.cdtor_returns_this targets. */
#define LABEL_DECL_CDTOR(NODE) \
DECL_LANG_FLAG_2 (LABEL_DECL_CHECK (NODE))
/* True if NODE was declared with auto in its return type, but it has
started compilation and so the return type might have been changed by
return type deduction; its declared return type should be found in
DECL_STRUCT_FUNCTION(NODE)->language->x_auto_return_pattern. */
#define FNDECL_USED_AUTO(NODE) \
TREE_LANG_FLAG_2 (FUNCTION_DECL_CHECK (NODE))
/* Nonzero if NODE is a DECL which we know about but which has not
been explicitly declared, such as a built-in function or a friend
declared inside a class. In the latter case DECL_HIDDEN_FRIEND_P
will be set. */
#define DECL_ANTICIPATED(NODE) \
(DECL_LANG_SPECIFIC (TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK (NODE)) \
->u.base.anticipated_p)
/* Is DECL NODE a hidden name? */
#define DECL_HIDDEN_P(NODE) \
(DECL_LANG_SPECIFIC (NODE) && TYPE_FUNCTION_OR_TEMPLATE_DECL_P (NODE) \
&& DECL_ANTICIPATED (NODE))
/* True if this is a hidden class type. */
#define TYPE_HIDDEN_P(NODE) \
(DECL_LANG_SPECIFIC (TYPE_NAME (NODE)) \
&& DECL_ANTICIPATED (TYPE_NAME (NODE)))
/* True for artificial decls added for OpenMP privatized non-static
data members. */
#define DECL_OMP_PRIVATIZED_MEMBER(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.anticipated_p)
/* Nonzero if NODE is a FUNCTION_DECL which was declared as a friend
within a class but has not been declared in the surrounding scope.
The function is invisible except via argument dependent lookup. */
#define DECL_HIDDEN_FRIEND_P(NODE) \
(LANG_DECL_FN_CHECK (DECL_COMMON_CHECK (NODE))->hidden_friend_p)
/* Nonzero if NODE is an artificial FUNCTION_DECL for
#pragma omp declare reduction. */
#define DECL_OMP_DECLARE_REDUCTION_P(NODE) \
(LANG_DECL_FN_CHECK (DECL_COMMON_CHECK (NODE))->omp_declare_reduction_p)
/* Nonzero if DECL has been declared threadprivate by
#pragma omp threadprivate. */
#define CP_DECL_THREADPRIVATE_P(DECL) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (DECL))->u.base.threadprivate_or_deleted_p)
/* Nonzero if NODE is a VAR_DECL which has been declared inline. */
#define DECL_VAR_DECLARED_INLINE_P(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \
? DECL_LANG_SPECIFIC (NODE)->u.base.var_declared_inline_p \
: false)
#define SET_DECL_VAR_DECLARED_INLINE_P(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.var_declared_inline_p \
= true)
/* True if NODE is a constant variable with a value-dependent initializer. */
#define DECL_DEPENDENT_INIT_P(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \
&& DECL_LANG_SPECIFIC (NODE)->u.base.dependent_init_p)
#define SET_DECL_DEPENDENT_INIT_P(NODE, X) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.dependent_init_p = (X))
/* Nonzero if NODE is an artificial VAR_DECL for a C++17 structured binding
declaration or one of VAR_DECLs for the user identifiers in it. */
#define DECL_DECOMPOSITION_P(NODE) \
(VAR_P (NODE) && DECL_LANG_SPECIFIC (NODE) \
? DECL_LANG_SPECIFIC (NODE)->u.base.selector == lds_decomp \
: false)
/* The underlying artificial VAR_DECL for structured binding. */
#define DECL_DECOMP_BASE(NODE) \
(LANG_DECL_DECOMP_CHECK (NODE)->base)
/* Nonzero if NODE is an inline VAR_DECL. In C++17, static data members
declared with constexpr specifier are implicitly inline variables. */
#define DECL_INLINE_VAR_P(NODE) \
(DECL_VAR_DECLARED_INLINE_P (NODE) \
|| (cxx_dialect >= cxx17 \
&& DECL_DECLARED_CONSTEXPR_P (NODE) \
&& DECL_CLASS_SCOPE_P (NODE)))
/* Nonzero if DECL was declared with '= delete'. */
#define DECL_DELETED_FN(DECL) \
(LANG_DECL_FN_CHECK (DECL)->min.base.threadprivate_or_deleted_p)
/* Nonzero if DECL was declared with '= default' (maybe implicitly). */
#define DECL_DEFAULTED_FN(DECL) \
(LANG_DECL_FN_CHECK (DECL)->defaulted_p)
/* Nonzero if DECL is explicitly defaulted in the class body. */
#define DECL_DEFAULTED_IN_CLASS_P(DECL) \
(DECL_DEFAULTED_FN (DECL) && DECL_INITIALIZED_IN_CLASS_P (DECL))
/* Nonzero if DECL was defaulted outside the class body. */
#define DECL_DEFAULTED_OUTSIDE_CLASS_P(DECL) \
(DECL_DEFAULTED_FN (DECL) \
&& !(DECL_ARTIFICIAL (DECL) || DECL_INITIALIZED_IN_CLASS_P (DECL)))
/* Record whether a typedef for type `int' was actually `signed int'. */
#define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP)
/* Returns nonzero if DECL has external linkage, as specified by the
language standard. (This predicate may hold even when the
corresponding entity is not actually given external linkage in the
object file; see decl_linkage for details.) */
#define DECL_EXTERNAL_LINKAGE_P(DECL) \
(decl_linkage (DECL) == lk_external)
/* Keep these codes in ascending code order. */
#define INTEGRAL_CODE_P(CODE) \
((CODE) == ENUMERAL_TYPE \
|| (CODE) == BOOLEAN_TYPE \
|| (CODE) == INTEGER_TYPE)
/* [basic.fundamental]
Types bool, char, wchar_t, and the signed and unsigned integer types
are collectively called integral types.
Note that INTEGRAL_TYPE_P, as defined in tree.h, allows enumeration
types as well, which is incorrect in C++. Keep these checks in
ascending code order. */
#define CP_INTEGRAL_TYPE_P(TYPE) \
(TREE_CODE (TYPE) == BOOLEAN_TYPE \
|| TREE_CODE (TYPE) == INTEGER_TYPE)
/* Returns true if TYPE is an integral or enumeration name. Keep
these checks in ascending code order. */
#define INTEGRAL_OR_ENUMERATION_TYPE_P(TYPE) \
(TREE_CODE (TYPE) == ENUMERAL_TYPE || CP_INTEGRAL_TYPE_P (TYPE))
/* Returns true if TYPE is an integral or unscoped enumeration type. */
#define INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P(TYPE) \
(UNSCOPED_ENUM_P (TYPE) || CP_INTEGRAL_TYPE_P (TYPE))
/* True if the class type TYPE is a literal type. */
#define CLASSTYPE_LITERAL_P(TYPE) \
(LANG_TYPE_CLASS_CHECK (TYPE)->is_literal)
/* [basic.fundamental]
Integral and floating types are collectively called arithmetic
types.
As a GNU extension, we also accept complex types.
Keep these checks in ascending code order. */
#define ARITHMETIC_TYPE_P(TYPE) \
(CP_INTEGRAL_TYPE_P (TYPE) \
|| TREE_CODE (TYPE) == REAL_TYPE \
|| TREE_CODE (TYPE) == COMPLEX_TYPE)
/* True iff TYPE is cv decltype(nullptr). */
#define NULLPTR_TYPE_P(TYPE) (TREE_CODE (TYPE) == NULLPTR_TYPE)
/* [basic.types]
Arithmetic types, enumeration types, pointer types,
pointer-to-member types, and std::nullptr_t are collectively called
scalar types.
Keep these checks in ascending code order. */
#define SCALAR_TYPE_P(TYPE) \
(TYPE_PTRDATAMEM_P (TYPE) \
|| TREE_CODE (TYPE) == ENUMERAL_TYPE \
|| ARITHMETIC_TYPE_P (TYPE) \
|| TYPE_PTR_P (TYPE) \
|| TYPE_PTRMEMFUNC_P (TYPE) \
|| NULLPTR_TYPE_P (TYPE))
/* Determines whether this type is a C++0x scoped enumeration
type. Scoped enumerations types are introduced via "enum class" or
"enum struct", e.g.,
enum class Color {
Red, Green, Blue
};
Scoped enumeration types are different from normal (unscoped)
enumeration types in several ways:
- The enumerators of a scoped enumeration type are only available
within the scope of the enumeration type and not in the
enclosing scope. For example, the Red color can be referred to
with "Color::Red" but not "Red".
- Scoped enumerators and enumerations do not implicitly convert
to integers or 'bool'.
- The underlying type of the enum is well-defined. */
#define SCOPED_ENUM_P(TYPE) \
(TREE_CODE (TYPE) == ENUMERAL_TYPE && ENUM_IS_SCOPED (TYPE))
/* Determine whether this is an unscoped enumeration type. */
#define UNSCOPED_ENUM_P(TYPE) \
(TREE_CODE (TYPE) == ENUMERAL_TYPE && !ENUM_IS_SCOPED (TYPE))
/* Set the flag indicating whether an ENUMERAL_TYPE is a C++0x scoped
enumeration type (1) or a normal (unscoped) enumeration type
(0). */
#define SET_SCOPED_ENUM_P(TYPE, VAL) \
(ENUM_IS_SCOPED (TYPE) = (VAL))
#define SET_OPAQUE_ENUM_P(TYPE, VAL) \
(ENUM_IS_OPAQUE (TYPE) = (VAL))
#define OPAQUE_ENUM_P(TYPE) \
(TREE_CODE (TYPE) == ENUMERAL_TYPE && ENUM_IS_OPAQUE (TYPE))
/* Determines whether an ENUMERAL_TYPE has an explicit
underlying type. */
#define ENUM_FIXED_UNDERLYING_TYPE_P(NODE) (TYPE_LANG_FLAG_5 (NODE))
/* Returns the underlying type of the given enumeration type. The
underlying type is determined in different ways, depending on the
properties of the enum:
- In C++0x, the underlying type can be explicitly specified, e.g.,
enum E1 : char { ... } // underlying type is char
- In a C++0x scoped enumeration, the underlying type is int
unless otherwises specified:
enum class E2 { ... } // underlying type is int
- Otherwise, the underlying type is determined based on the
values of the enumerators. In this case, the
ENUM_UNDERLYING_TYPE will not be set until after the definition
of the enumeration is completed by finish_enum. */
#define ENUM_UNDERLYING_TYPE(TYPE) \
TREE_TYPE (ENUMERAL_TYPE_CHECK (TYPE))
/* [dcl.init.aggr]
An aggregate is an array or a class with no user-provided
constructors, no brace-or-equal-initializers for non-static data
members, no private or protected non-static data members, no
base classes, and no virtual functions.
As an extension, we also treat vectors as aggregates. Keep these
checks in ascending code order. */
#define CP_AGGREGATE_TYPE_P(TYPE) \
(TREE_CODE (TYPE) == VECTOR_TYPE \
||TREE_CODE (TYPE) == ARRAY_TYPE \
|| (CLASS_TYPE_P (TYPE) && !CLASSTYPE_NON_AGGREGATE (TYPE)))
/* Nonzero for a class type means that the class type has a
user-declared constructor. */
#define TYPE_HAS_USER_CONSTRUCTOR(NODE) (TYPE_LANG_FLAG_1 (NODE))
/* Nonzero means that the FUNCTION_TYPE or METHOD_TYPE has a
late-specified return type. */
#define TYPE_HAS_LATE_RETURN_TYPE(NODE) \
(TYPE_LANG_FLAG_2 (FUNC_OR_METHOD_CHECK (NODE)))
/* When appearing in an INDIRECT_REF, it means that the tree structure
underneath is actually a call to a constructor. This is needed
when the constructor must initialize local storage (which can
be automatically destroyed), rather than allowing it to allocate
space from the heap.
When appearing in a SAVE_EXPR, it means that underneath
is a call to a constructor.
When appearing in a CONSTRUCTOR, the expression is a
compound literal.
When appearing in a FIELD_DECL, it means that this field
has been duly initialized in its constructor. */
#define TREE_HAS_CONSTRUCTOR(NODE) (TREE_LANG_FLAG_4 (NODE))
/* True if NODE is a brace-enclosed initializer. */
#define BRACE_ENCLOSED_INITIALIZER_P(NODE) \
(TREE_CODE (NODE) == CONSTRUCTOR && TREE_TYPE (NODE) == init_list_type_node)
/* True if NODE is a compound-literal, i.e., a brace-enclosed
initializer cast to a particular type. */
#define COMPOUND_LITERAL_P(NODE) \
(TREE_CODE (NODE) == CONSTRUCTOR && TREE_HAS_CONSTRUCTOR (NODE))
#define EMPTY_CONSTRUCTOR_P(NODE) (TREE_CODE (NODE) == CONSTRUCTOR \
&& vec_safe_is_empty(CONSTRUCTOR_ELTS(NODE))\
&& !TREE_HAS_CONSTRUCTOR (NODE))
/* True if NODE is a init-list used as a direct-initializer, i.e.
B b{1,2}, not B b({1,2}) or B b = {1,2}. */
#define CONSTRUCTOR_IS_DIRECT_INIT(NODE) (TREE_LANG_FLAG_0 (CONSTRUCTOR_CHECK (NODE)))
/* True if an uninitialized element in NODE should not be treated as
implicitly value-initialized. Only used in constexpr evaluation. */
#define CONSTRUCTOR_NO_IMPLICIT_ZERO(NODE) \
(TREE_LANG_FLAG_1 (CONSTRUCTOR_CHECK (NODE)))
/* True if this CONSTRUCTOR should not be used as a variable initializer
because it was loaded from a constexpr variable with mutable fields. */
#define CONSTRUCTOR_MUTABLE_POISON(NODE) \
(TREE_LANG_FLAG_2 (CONSTRUCTOR_CHECK (NODE)))
/* True if this typed CONSTRUCTOR represents C99 compound-literal syntax rather
than C++11 functional cast syntax. */
#define CONSTRUCTOR_C99_COMPOUND_LITERAL(NODE) \
(TREE_LANG_FLAG_3 (CONSTRUCTOR_CHECK (NODE)))
/* True if this CONSTRUCTOR contains PLACEHOLDER_EXPRs referencing the
CONSTRUCTOR's type not nested inside another CONSTRUCTOR marked with
CONSTRUCTOR_PLACEHOLDER_BOUNDARY. */
#define CONSTRUCTOR_PLACEHOLDER_BOUNDARY(NODE) \
(TREE_LANG_FLAG_5 (CONSTRUCTOR_CHECK (NODE)))
#define DIRECT_LIST_INIT_P(NODE) \
(BRACE_ENCLOSED_INITIALIZER_P (NODE) && CONSTRUCTOR_IS_DIRECT_INIT (NODE))
/* True if NODE represents a conversion for direct-initialization in a
template. Set by perform_implicit_conversion_flags. */
#define IMPLICIT_CONV_EXPR_DIRECT_INIT(NODE) \
(TREE_LANG_FLAG_0 (IMPLICIT_CONV_EXPR_CHECK (NODE)))
/* True if NODE represents a dependent conversion of a non-type template
argument. Set by maybe_convert_nontype_argument. */
#define IMPLICIT_CONV_EXPR_NONTYPE_ARG(NODE) \
(TREE_LANG_FLAG_1 (IMPLICIT_CONV_EXPR_CHECK (NODE)))
/* Nonzero means that an object of this type can not be initialized using
an initializer list. */
#define CLASSTYPE_NON_AGGREGATE(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->non_aggregate)
#define TYPE_NON_AGGREGATE_CLASS(NODE) \
(CLASS_TYPE_P (NODE) && CLASSTYPE_NON_AGGREGATE (NODE))
/* Nonzero if there is a non-trivial X::op=(cv X&) for this class. */
#define TYPE_HAS_COMPLEX_COPY_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_copy_assign)
/* Nonzero if there is a non-trivial X::X(cv X&) for this class. */
#define TYPE_HAS_COMPLEX_COPY_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_copy_ctor)
/* Nonzero if there is a non-trivial X::op=(X&&) for this class. */
#define TYPE_HAS_COMPLEX_MOVE_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_move_assign)
/* Nonzero if there is a non-trivial X::X(X&&) for this class. */
#define TYPE_HAS_COMPLEX_MOVE_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_move_ctor)
/* Nonzero if there is no trivial default constructor for this class. */
#define TYPE_HAS_COMPLEX_DFLT(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_dflt)
/* Nonzero if TYPE has a trivial destructor. From [class.dtor]:
A destructor is trivial if it is an implicitly declared
destructor and if:
- all of the direct base classes of its class have trivial
destructors,
- for all of the non-static data members of its class that are
of class type (or array thereof), each such class has a
trivial destructor. */
#define TYPE_HAS_TRIVIAL_DESTRUCTOR(NODE) \
(!TYPE_HAS_NONTRIVIAL_DESTRUCTOR (NODE))
/* Nonzero for _TYPE node means that this type does not have a trivial
destructor. Therefore, destroying an object of this type will
involve a call to a destructor. This can apply to objects of
ARRAY_TYPE is the type of the elements needs a destructor. */
#define TYPE_HAS_NONTRIVIAL_DESTRUCTOR(NODE) \
(TYPE_LANG_FLAG_4 (NODE))
/* Nonzero for class type means that the default constructor is trivial. */
#define TYPE_HAS_TRIVIAL_DFLT(NODE) \
(TYPE_HAS_DEFAULT_CONSTRUCTOR (NODE) && ! TYPE_HAS_COMPLEX_DFLT (NODE))
/* Nonzero for class type means that copy initialization of this type can use
a bitwise copy. */
#define TYPE_HAS_TRIVIAL_COPY_CTOR(NODE) \
(TYPE_HAS_COPY_CTOR (NODE) && ! TYPE_HAS_COMPLEX_COPY_CTOR (NODE))
/* Nonzero for class type means that assignment of this type can use
a bitwise copy. */
#define TYPE_HAS_TRIVIAL_COPY_ASSIGN(NODE) \
(TYPE_HAS_COPY_ASSIGN (NODE) && ! TYPE_HAS_COMPLEX_COPY_ASSIGN (NODE))
/* Returns true if NODE is a pointer-to-data-member. */
#define TYPE_PTRDATAMEM_P(NODE) \
(TREE_CODE (NODE) == OFFSET_TYPE)
/* Returns true if NODE is a pointer. */
#define TYPE_PTR_P(NODE) \
(TREE_CODE (NODE) == POINTER_TYPE)
/* Returns true if NODE is an object type:
[basic.types]
An object type is a (possibly cv-qualified) type that is not a
function type, not a reference type, and not a void type.
Keep these checks in ascending order, for speed. */
#define TYPE_OBJ_P(NODE) \
(TREE_CODE (NODE) != REFERENCE_TYPE \
&& !VOID_TYPE_P (NODE) \
&& TREE_CODE (NODE) != FUNCTION_TYPE \
&& TREE_CODE (NODE) != METHOD_TYPE)
/* Returns true if NODE is a pointer to an object. Keep these checks
in ascending tree code order. */
#define TYPE_PTROB_P(NODE) \
(TYPE_PTR_P (NODE) && TYPE_OBJ_P (TREE_TYPE (NODE)))
/* Returns true if NODE is a reference to an object. Keep these checks
in ascending tree code order. */
#define TYPE_REF_OBJ_P(NODE) \
(TREE_CODE (NODE) == REFERENCE_TYPE && TYPE_OBJ_P (TREE_TYPE (NODE)))
/* Returns true if NODE is a pointer to an object, or a pointer to
void. Keep these checks in ascending tree code order. */
#define TYPE_PTROBV_P(NODE) \
(TYPE_PTR_P (NODE) \
&& !(TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE \
|| TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE))
/* Returns true if NODE is a pointer to function type. */
#define TYPE_PTRFN_P(NODE) \
(TYPE_PTR_P (NODE) \
&& TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE)
/* Returns true if NODE is a reference to function type. */
#define TYPE_REFFN_P(NODE) \
(TREE_CODE (NODE) == REFERENCE_TYPE \
&& TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE)
/* Returns true if NODE is a pointer to member function type. */
#define TYPE_PTRMEMFUNC_P(NODE) \
(TREE_CODE (NODE) == RECORD_TYPE \
&& TYPE_PTRMEMFUNC_FLAG (NODE))
#define TYPE_PTRMEMFUNC_FLAG(NODE) \
(TYPE_LANG_FLAG_2 (RECORD_TYPE_CHECK (NODE)))
/* Returns true if NODE is a pointer-to-member. */
#define TYPE_PTRMEM_P(NODE) \
(TYPE_PTRDATAMEM_P (NODE) || TYPE_PTRMEMFUNC_P (NODE))
/* Returns true if NODE is a pointer or a pointer-to-member. */
#define TYPE_PTR_OR_PTRMEM_P(NODE) \
(TYPE_PTR_P (NODE) || TYPE_PTRMEM_P (NODE))
/* Indicates when overload resolution may resolve to a pointer to
member function. [expr.unary.op]/3 */
#define PTRMEM_OK_P(NODE) \
TREE_LANG_FLAG_0 (TREE_CHECK3 ((NODE), ADDR_EXPR, OFFSET_REF, SCOPE_REF))
/* Get the POINTER_TYPE to the METHOD_TYPE associated with this
pointer to member function. TYPE_PTRMEMFUNC_P _must_ be true,
before using this macro. */
#define TYPE_PTRMEMFUNC_FN_TYPE(NODE) \
(cp_build_qualified_type (TREE_TYPE (TYPE_FIELDS (NODE)),\
cp_type_quals (NODE)))
/* As above, but can be used in places that want an lvalue at the expense
of not necessarily having the correct cv-qualifiers. */
#define TYPE_PTRMEMFUNC_FN_TYPE_RAW(NODE) \
(TREE_TYPE (TYPE_FIELDS (NODE)))
/* Returns `A' for a type like `int (A::*)(double)' */
#define TYPE_PTRMEMFUNC_OBJECT_TYPE(NODE) \
TYPE_METHOD_BASETYPE (TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE)))
/* The canonical internal RECORD_TYPE from the POINTER_TYPE to
METHOD_TYPE. */
#define TYPE_PTRMEMFUNC_TYPE(NODE) \
TYPE_LANG_SLOT_1 (NODE)
/* For a pointer-to-member type of the form `T X::*', this is `X'.
For a type like `void (X::*)() const', this type is `X', not `const
X'. To get at the `const X' you have to look at the
TYPE_PTRMEM_POINTED_TO_TYPE; there, the first parameter will have
type `const X*'. */
#define TYPE_PTRMEM_CLASS_TYPE(NODE) \
(TYPE_PTRDATAMEM_P (NODE) \
? TYPE_OFFSET_BASETYPE (NODE) \
: TYPE_PTRMEMFUNC_OBJECT_TYPE (NODE))
/* For a pointer-to-member type of the form `T X::*', this is `T'. */
#define TYPE_PTRMEM_POINTED_TO_TYPE(NODE) \
(TYPE_PTRDATAMEM_P (NODE) \
? TREE_TYPE (NODE) \
: TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE)))
/* For a pointer-to-member constant `X::Y' this is the RECORD_TYPE for
`X'. */
#define PTRMEM_CST_CLASS(NODE) \
TYPE_PTRMEM_CLASS_TYPE (TREE_TYPE (PTRMEM_CST_CHECK (NODE)))
/* For a pointer-to-member constant `X::Y' this is the _DECL for
`Y'. */
#define PTRMEM_CST_MEMBER(NODE) \
(((ptrmem_cst_t)PTRMEM_CST_CHECK (NODE))->member)
/* The expression in question for a TYPEOF_TYPE. */
#define TYPEOF_TYPE_EXPR(NODE) (TYPE_VALUES_RAW (TYPEOF_TYPE_CHECK (NODE)))
/* The type in question for an UNDERLYING_TYPE. */
#define UNDERLYING_TYPE_TYPE(NODE) \
(TYPE_VALUES_RAW (UNDERLYING_TYPE_CHECK (NODE)))
/* The type in question for BASES. */
#define BASES_TYPE(NODE) \
(TYPE_VALUES_RAW (BASES_CHECK (NODE)))
#define BASES_DIRECT(NODE) \
TREE_LANG_FLAG_0 (BASES_CHECK (NODE))
/* The expression in question for a DECLTYPE_TYPE. */
#define DECLTYPE_TYPE_EXPR(NODE) (TYPE_VALUES_RAW (DECLTYPE_TYPE_CHECK (NODE)))
/* Whether the DECLTYPE_TYPE_EXPR of NODE was originally parsed as an
id-expression or a member-access expression. When false, it was
parsed as a full expression. */
#define DECLTYPE_TYPE_ID_EXPR_OR_MEMBER_ACCESS_P(NODE) \
(DECLTYPE_TYPE_CHECK (NODE))->type_common.string_flag
/* These flags indicate that we want different semantics from normal
decltype: lambda capture just drops references, init capture
uses auto semantics, lambda proxies look through implicit dereference. */
#define DECLTYPE_FOR_LAMBDA_CAPTURE(NODE) \
TREE_LANG_FLAG_0 (DECLTYPE_TYPE_CHECK (NODE))
#define DECLTYPE_FOR_INIT_CAPTURE(NODE) \
TREE_LANG_FLAG_1 (DECLTYPE_TYPE_CHECK (NODE))
#define DECLTYPE_FOR_LAMBDA_PROXY(NODE) \
TREE_LANG_FLAG_2 (DECLTYPE_TYPE_CHECK (NODE))
#define DECLTYPE_FOR_REF_CAPTURE(NODE) \
TREE_LANG_FLAG_3 (DECLTYPE_TYPE_CHECK (NODE))
/* Nonzero for VAR_DECL and FUNCTION_DECL node means that `extern' was
specified in its declaration. This can also be set for an
erroneously declared PARM_DECL. */
#define DECL_THIS_EXTERN(NODE) \
DECL_LANG_FLAG_2 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE))
/* Nonzero for VAR_DECL and FUNCTION_DECL node means that `static' was
specified in its declaration. This can also be set for an
erroneously declared PARM_DECL. */
#define DECL_THIS_STATIC(NODE) \
DECL_LANG_FLAG_6 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE))
/* Nonzero for FIELD_DECL node means that this field is a lambda capture
field for an array of runtime bound. */
#define DECL_VLA_CAPTURE_P(NODE) \
DECL_LANG_FLAG_1 (FIELD_DECL_CHECK (NODE))
/* Nonzero for PARM_DECL node means that this is an array function
parameter, i.e, a[] rather than *a. */
#define DECL_ARRAY_PARAMETER_P(NODE) \
DECL_LANG_FLAG_1 (PARM_DECL_CHECK (NODE))
/* Nonzero for a FIELD_DECL who's NSMDI is currently being
instantiated. */
#define DECL_INSTANTIATING_NSDMI_P(NODE) \
DECL_LANG_FLAG_2 (FIELD_DECL_CHECK (NODE))
/* Nonzero for FIELD_DECL node means that this field is a base class
of the parent object, as opposed to a member field. */
#define DECL_FIELD_IS_BASE(NODE) \
DECL_LANG_FLAG_6 (FIELD_DECL_CHECK (NODE))
/* Nonzero for FIELD_DECL node means that this field is a simple (no
explicit initializer) lambda capture field, making it invisible to
name lookup in unevaluated contexts. */
#define DECL_NORMAL_CAPTURE_P(NODE) \
DECL_LANG_FLAG_7 (FIELD_DECL_CHECK (NODE))
/* Nonzero if TYPE is an anonymous union or struct type. We have to use a
flag for this because "A union for which objects or pointers are
declared is not an anonymous union" [class.union]. */
#define ANON_AGGR_TYPE_P(NODE) \
(CLASS_TYPE_P (NODE) && LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr)
#define SET_ANON_AGGR_TYPE_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr = 1)
/* Nonzero if TYPE is an anonymous union type. */
#define ANON_UNION_TYPE_P(NODE) \
(TREE_CODE (NODE) == UNION_TYPE && ANON_AGGR_TYPE_P (NODE))
/* Define fields and accessors for nodes representing declared names. */
/* Nonzero if TYPE is an unnamed class with a typedef for linkage purposes. */
#define TYPE_WAS_UNNAMED(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->was_anonymous)
/* C++: all of these are overloaded! These apply only to TYPE_DECLs. */
/* The format of each node in the DECL_FRIENDLIST is as follows:
The TREE_PURPOSE will be the name of a function, i.e., an
IDENTIFIER_NODE. The TREE_VALUE will be itself a TREE_LIST, whose
TREE_VALUEs are friends with the given name. */
#define DECL_FRIENDLIST(NODE) (DECL_INITIAL (NODE))
#define FRIEND_NAME(LIST) (TREE_PURPOSE (LIST))
#define FRIEND_DECLS(LIST) (TREE_VALUE (LIST))
/* The DECL_ACCESS, if non-NULL, is a TREE_LIST. The TREE_PURPOSE of
each node is a type; the TREE_VALUE is the access granted for this
DECL in that type. The DECL_ACCESS is set by access declarations.
For example, if a member that would normally be public in a
derived class is made protected, then the derived class and the
protected_access_node will appear in the DECL_ACCESS for the node. */
#define DECL_ACCESS(NODE) (LANG_DECL_U2_CHECK (NODE, 0)->access)
/* Nonzero if the FUNCTION_DECL is a global constructor. */
#define DECL_GLOBAL_CTOR_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->global_ctor_p)
/* Nonzero if the FUNCTION_DECL is a global destructor. */
#define DECL_GLOBAL_DTOR_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->global_dtor_p)
/* Accessor macros for C++ template decl nodes. */
/* The DECL_TEMPLATE_PARMS are a list. The TREE_PURPOSE of each node
is a INT_CST whose TREE_INT_CST_LOW indicates the level of the
template parameters, with 1 being the outermost set of template
parameters. The TREE_VALUE is a vector, whose elements are the
template parameters at each level. Each element in the vector is a
TREE_LIST, whose TREE_VALUE is a PARM_DECL (if the parameter is a
non-type parameter), or a TYPE_DECL (if the parameter is a type
parameter). The TREE_PURPOSE is the default value, if any. The
TEMPLATE_PARM_INDEX for the parameter is available as the
DECL_INITIAL (for a PARM_DECL) or as the TREE_TYPE (for a
TYPE_DECL).
FIXME: CONST_CAST_TREE is a hack that hopefully will go away after
tree is converted to C++ class hiearchy. */
#define DECL_TEMPLATE_PARMS(NODE) \
((struct tree_template_decl *)CONST_CAST_TREE (TEMPLATE_DECL_CHECK (NODE)))->arguments
#define DECL_INNERMOST_TEMPLATE_PARMS(NODE) \
INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (NODE))
#define DECL_NTPARMS(NODE) \
TREE_VEC_LENGTH (DECL_INNERMOST_TEMPLATE_PARMS (NODE))
/* For function, method, class-data templates.
FIXME: CONST_CAST_TREE is a hack that hopefully will go away after
tree is converted to C++ class hiearchy. */
#define DECL_TEMPLATE_RESULT(NODE) \
((struct tree_template_decl *)CONST_CAST_TREE(TEMPLATE_DECL_CHECK (NODE)))->result
/* For a function template at namespace scope, DECL_TEMPLATE_INSTANTIATIONS
lists all instantiations and specializations of the function so that
tsubst_friend_function can reassign them to another template if we find
that the namespace-scope template is really a partial instantiation of a
friend template.
For a class template the DECL_TEMPLATE_INSTANTIATIONS lists holds
all instantiations and specializations of the class type, including
partial instantiations and partial specializations, so that if we
explicitly specialize a partial instantiation we can walk the list
in maybe_process_partial_specialization and reassign them or complain
as appropriate.
In both cases, the TREE_PURPOSE of each node contains the arguments
used; the TREE_VALUE contains the generated variable. The template
arguments are always complete. For example, given:
template <class T> struct S1 {
template <class U> struct S2 {};
template <class U> struct S2<U*> {};
};
the record for the partial specialization will contain, as its
argument list, { {T}, {U*} }, and will be on the
DECL_TEMPLATE_INSTANTIATIONS list for `template <class T> template
<class U> struct S1<T>::S2'.
This list is not used for other templates. */
#define DECL_TEMPLATE_INSTANTIATIONS(NODE) \
DECL_SIZE_UNIT (TEMPLATE_DECL_CHECK (NODE))
/* For a class template, this list contains the partial
specializations of this template. (Full specializations are not
recorded on this list.) The TREE_PURPOSE holds the arguments used
in the partial specialization (e.g., for `template <class T> struct
S<T*, int>' this will be `T*, int'.) The arguments will also include
any outer template arguments. The TREE_VALUE holds the TEMPLATE_DECL
for the partial specialization. The TREE_TYPE is the _TYPE node for
the partial specialization.
This list is not used for other templates. */
#define DECL_TEMPLATE_SPECIALIZATIONS(NODE) \
DECL_SIZE (TEMPLATE_DECL_CHECK (NODE))
/* Nonzero for a DECL which is actually a template parameter. Keep
these checks in ascending tree code order. */
#define DECL_TEMPLATE_PARM_P(NODE) \
(DECL_LANG_FLAG_0 (NODE) \
&& (TREE_CODE (NODE) == CONST_DECL \
|| TREE_CODE (NODE) == PARM_DECL \
|| TREE_CODE (NODE) == TYPE_DECL \
|| TREE_CODE (NODE) == TEMPLATE_DECL))
/* Nonzero for a raw template parameter node. */
#define TEMPLATE_PARM_P(NODE) \
(TREE_CODE (NODE) == TEMPLATE_TYPE_PARM \
|| TREE_CODE (NODE) == TEMPLATE_TEMPLATE_PARM \
|| TREE_CODE (NODE) == TEMPLATE_PARM_INDEX)
/* Mark NODE as a template parameter. */
#define SET_DECL_TEMPLATE_PARM_P(NODE) \
(DECL_LANG_FLAG_0 (NODE) = 1)
/* Nonzero if NODE is a template template parameter. */
#define DECL_TEMPLATE_TEMPLATE_PARM_P(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL && DECL_TEMPLATE_PARM_P (NODE))
/* Nonzero for a DECL that represents a function template. */
#define DECL_FUNCTION_TEMPLATE_P(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL \
&& DECL_TEMPLATE_RESULT (NODE) != NULL_TREE \
&& TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == FUNCTION_DECL)
/* Nonzero for a DECL that represents a class template or alias
template. */
#define DECL_TYPE_TEMPLATE_P(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL \
&& DECL_TEMPLATE_RESULT (NODE) != NULL_TREE \
&& TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == TYPE_DECL)
/* Nonzero for a DECL that represents a class template. */
#define DECL_CLASS_TEMPLATE_P(NODE) \
(DECL_TYPE_TEMPLATE_P (NODE) \
&& DECL_IMPLICIT_TYPEDEF_P (DECL_TEMPLATE_RESULT (NODE)))
/* Nonzero for a TEMPLATE_DECL that represents an alias template. */
#define DECL_ALIAS_TEMPLATE_P(NODE) \
(DECL_TYPE_TEMPLATE_P (NODE) \
&& !DECL_ARTIFICIAL (DECL_TEMPLATE_RESULT (NODE)))
/* Nonzero for a NODE which declares a type. */
#define DECL_DECLARES_TYPE_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL || DECL_TYPE_TEMPLATE_P (NODE))
/* Nonzero if NODE declares a function. */
#define DECL_DECLARES_FUNCTION_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL || DECL_FUNCTION_TEMPLATE_P (NODE))
/* Nonzero if NODE is the typedef implicitly generated for a type when
the type is declared. In C++, `struct S {};' is roughly
equivalent to `struct S {}; typedef struct S S;' in C.
DECL_IMPLICIT_TYPEDEF_P will hold for the typedef indicated in this
example. In C++, there is a second implicit typedef for each
class, called the injected-class-name, in the scope of `S' itself, so that
you can say `S::S'. DECL_SELF_REFERENCE_P will hold for that typedef. */
#define DECL_IMPLICIT_TYPEDEF_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_2 (NODE))
#define SET_DECL_IMPLICIT_TYPEDEF_P(NODE) \
(DECL_LANG_FLAG_2 (NODE) = 1)
#define DECL_SELF_REFERENCE_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_4 (NODE))
#define SET_DECL_SELF_REFERENCE_P(NODE) \
(DECL_LANG_FLAG_4 (NODE) = 1)
/* A `primary' template is one that has its own template header and is not
a partial specialization. A member function of a class template is a
template, but not primary. A member template is primary. Friend
templates are primary, too. */
/* Returns the primary template corresponding to these parameters. */
#define DECL_PRIMARY_TEMPLATE(NODE) \
(TREE_TYPE (DECL_INNERMOST_TEMPLATE_PARMS (NODE)))
/* Returns nonzero if NODE is a primary template. */
#define PRIMARY_TEMPLATE_P(NODE) (DECL_PRIMARY_TEMPLATE (NODE) == (NODE))
/* Nonzero iff NODE is a specialization of a template. The value
indicates the type of specializations:
1=implicit instantiation
2=partial or explicit specialization, e.g.:
template <> int min<int> (int, int),
3=explicit instantiation, e.g.:
template int min<int> (int, int);
Note that NODE will be marked as a specialization even if the
template it is instantiating is not a primary template. For
example, given:
template <typename T> struct O {
void f();
struct I {};
};
both O<int>::f and O<int>::I will be marked as instantiations.
If DECL_USE_TEMPLATE is nonzero, then DECL_TEMPLATE_INFO will also
be non-NULL. */
#define DECL_USE_TEMPLATE(NODE) (DECL_LANG_SPECIFIC (NODE)->u.base.use_template)
/* Like DECL_USE_TEMPLATE, but for class types. */
#define CLASSTYPE_USE_TEMPLATE(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->use_template)
/* True if NODE is a specialization of a primary template. */
#define CLASSTYPE_SPECIALIZATION_OF_PRIMARY_TEMPLATE_P(NODE) \
(CLASS_TYPE_P (NODE) \
&& CLASSTYPE_USE_TEMPLATE (NODE) \
&& PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (NODE)))
#define DECL_TEMPLATE_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) & 1)
#define CLASSTYPE_TEMPLATE_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) & 1)
#define DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) == 2)
#define SET_DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) = 2)
/* Returns true for an explicit or partial specialization of a class
template. */
#define CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) == 2)
#define SET_CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) = 2)
#define DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 1)
#define SET_DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 1)
#define CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) == 1)
#define SET_CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) = 1)
#define DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 3)
#define SET_DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 3)
#define CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) == 3)
#define SET_CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) = 3)
/* Nonzero if DECL is a friend function which is an instantiation
from the point of view of the compiler, but not from the point of
view of the language. For example given:
template <class T> struct S { friend void f(T) {}; };
the declaration of `void f(int)' generated when S<int> is
instantiated will not be a DECL_TEMPLATE_INSTANTIATION, but will be
a DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION. */
#define DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION(DECL) \
(DECL_LANG_SPECIFIC (DECL) && DECL_TEMPLATE_INFO (DECL) \
&& !DECL_USE_TEMPLATE (DECL))
/* Nonzero if DECL is a function generated from a function 'temploid',
i.e. template, member of class template, or dependent friend. */
#define DECL_TEMPLOID_INSTANTIATION(DECL) \
(DECL_TEMPLATE_INSTANTIATION (DECL) \
|| DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION (DECL))
/* Nonzero if DECL is either defined implicitly by the compiler or
generated from a temploid. */
#define DECL_GENERATED_P(DECL) \
(DECL_TEMPLOID_INSTANTIATION (DECL) || DECL_DEFAULTED_FN (DECL))
/* Nonzero iff we are currently processing a declaration for an
entity with its own template parameter list, and which is not a
full specialization. */
#define PROCESSING_REAL_TEMPLATE_DECL_P() \
(!processing_template_parmlist \
&& processing_template_decl > template_class_depth (current_scope ()))
/* Nonzero if this VAR_DECL or FUNCTION_DECL has already been
instantiated, i.e. its definition has been generated from the
pattern given in the template. */
#define DECL_TEMPLATE_INSTANTIATED(NODE) \
DECL_LANG_FLAG_1 (VAR_OR_FUNCTION_DECL_CHECK (NODE))
/* We know what we're doing with this decl now. */
#define DECL_INTERFACE_KNOWN(NODE) DECL_LANG_FLAG_5 (NODE)
/* DECL_EXTERNAL must be set on a decl until the decl is actually emitted,
so that assemble_external will work properly. So we have this flag to
tell us whether the decl is really not external.
This flag does not indicate whether or not the decl is defined in the
current translation unit; it indicates whether or not we should emit the
decl at the end of compilation if it is defined and needed. */
#define DECL_NOT_REALLY_EXTERN(NODE) \
(DECL_LANG_SPECIFIC (NODE)->u.base.not_really_extern)
#define DECL_REALLY_EXTERN(NODE) \
(DECL_EXTERNAL (NODE) \
&& (!DECL_LANG_SPECIFIC (NODE) || !DECL_NOT_REALLY_EXTERN (NODE)))
/* A thunk is a stub function.
A thunk is an alternate entry point for an ordinary FUNCTION_DECL.
The address of the ordinary FUNCTION_DECL is given by the
DECL_INITIAL, which is always an ADDR_EXPR whose operand is a
FUNCTION_DECL. The job of the thunk is to either adjust the this
pointer before transferring control to the FUNCTION_DECL, or call
FUNCTION_DECL and then adjust the result value. Note, the result
pointer adjusting thunk must perform a call to the thunked
function, (or be implemented via passing some invisible parameter
to the thunked function, which is modified to perform the
adjustment just before returning).
A thunk may perform either, or both, of the following operations:
o Adjust the this or result pointer by a constant offset.
o Adjust the this or result pointer by looking up a vcall or vbase offset
in the vtable.
A this pointer adjusting thunk converts from a base to a derived
class, and hence adds the offsets. A result pointer adjusting thunk
converts from a derived class to a base, and hence subtracts the
offsets. If both operations are performed, then the constant
adjustment is performed first for this pointer adjustment and last
for the result pointer adjustment.
The constant adjustment is given by THUNK_FIXED_OFFSET. If the
vcall or vbase offset is required, THUNK_VIRTUAL_OFFSET is
used. For this pointer adjusting thunks, it is the vcall offset
into the vtable. For result pointer adjusting thunks it is the
binfo of the virtual base to convert to. Use that binfo's vbase
offset.
It is possible to have equivalent covariant thunks. These are
distinct virtual covariant thunks whose vbase offsets happen to
have the same value. THUNK_ALIAS is used to pick one as the
canonical thunk, which will get all the this pointer adjusting
thunks attached to it. */
/* An integer indicating how many bytes should be subtracted from the
this or result pointer when this function is called. */
#define THUNK_FIXED_OFFSET(DECL) \
(DECL_LANG_SPECIFIC (THUNK_FUNCTION_CHECK (DECL))->u.fn.u5.fixed_offset)
/* A tree indicating how to perform the virtual adjustment. For a this
adjusting thunk it is the number of bytes to be added to the vtable
to find the vcall offset. For a result adjusting thunk, it is the
binfo of the relevant virtual base. If NULL, then there is no
virtual adjust. (The vptr is always located at offset zero from
the this or result pointer.) (If the covariant type is within the
class hierarchy being laid out, the vbase index is not yet known
at the point we need to create the thunks, hence the need to use
binfos.) */
#define THUNK_VIRTUAL_OFFSET(DECL) \
(LANG_DECL_U2_CHECK (FUNCTION_DECL_CHECK (DECL), 0)->access)
/* A thunk which is equivalent to another thunk. */
#define THUNK_ALIAS(DECL) \
(DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (DECL))->u.min.template_info)
/* For thunk NODE, this is the FUNCTION_DECL thunked to. It is
possible for the target to be a thunk too. */
#define THUNK_TARGET(NODE) \
(LANG_DECL_FN_CHECK (NODE)->befriending_classes)
/* True for a SCOPE_REF iff the "template" keyword was used to
indicate that the qualified name denotes a template. */
#define QUALIFIED_NAME_IS_TEMPLATE(NODE) \
(TREE_LANG_FLAG_1 (SCOPE_REF_CHECK (NODE)))
/* True for an OMP_ATOMIC that has dependent parameters. These are stored
as an expr in operand 1, and integer_zero_node in operand 0. */
#define OMP_ATOMIC_DEPENDENT_P(NODE) \
(TREE_CODE (TREE_OPERAND (OMP_ATOMIC_CHECK (NODE), 0)) == INTEGER_CST)
/* Used while gimplifying continue statements bound to OMP_FOR nodes. */
#define OMP_FOR_GIMPLIFYING_P(NODE) \
(TREE_LANG_FLAG_0 (OMP_LOOP_CHECK (NODE)))
/* A language-specific token attached to the OpenMP data clauses to
hold code (or code fragments) related to ctors, dtors, and op=.
See semantics.c for details. */
#define CP_OMP_CLAUSE_INFO(NODE) \
TREE_TYPE (OMP_CLAUSE_RANGE_CHECK (NODE, OMP_CLAUSE_PRIVATE, \
OMP_CLAUSE_LINEAR))
/* Nonzero if this transaction expression's body contains statements. */
#define TRANSACTION_EXPR_IS_STMT(NODE) \
TREE_LANG_FLAG_0 (TRANSACTION_EXPR_CHECK (NODE))
/* These macros provide convenient access to the various _STMT nodes
created when parsing template declarations. */
#define TRY_STMTS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 0)
#define TRY_HANDLERS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 1)
#define EH_SPEC_STMTS(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 0)
#define EH_SPEC_RAISES(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 1)
#define USING_STMT_NAMESPACE(NODE) TREE_OPERAND (USING_STMT_CHECK (NODE), 0)
/* Nonzero if this try block is a function try block. */
#define FN_TRY_BLOCK_P(NODE) TREE_LANG_FLAG_3 (TRY_BLOCK_CHECK (NODE))
#define HANDLER_PARMS(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 0)
#define HANDLER_BODY(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 1)
#define HANDLER_TYPE(NODE) TREE_TYPE (HANDLER_CHECK (NODE))
/* CLEANUP_STMT accessors. The statement(s) covered, the cleanup to run
and the VAR_DECL for which this cleanup exists. */
#define CLEANUP_BODY(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 0)
#define CLEANUP_EXPR(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 1)
#define CLEANUP_DECL(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 2)
/* IF_STMT accessors. These give access to the condition of the if
statement, the then block of the if statement, and the else block
of the if statement if it exists. */
#define IF_COND(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 0)
#define THEN_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 1)
#define ELSE_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 2)
#define IF_SCOPE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 3)
#define IF_STMT_CONSTEXPR_P(NODE) TREE_LANG_FLAG_0 (IF_STMT_CHECK (NODE))
/* Like PACK_EXPANSION_EXTRA_ARGS, for constexpr if. IF_SCOPE is used while
building an IF_STMT; IF_STMT_EXTRA_ARGS is used after it is complete. */
#define IF_STMT_EXTRA_ARGS(NODE) IF_SCOPE (NODE)
/* WHILE_STMT accessors. These give access to the condition of the
while statement and the body of the while statement, respectively. */
#define WHILE_COND(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 0)
#define WHILE_BODY(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 1)
/* DO_STMT accessors. These give access to the condition of the do
statement and the body of the do statement, respectively. */
#define DO_COND(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 0)
#define DO_BODY(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 1)
/* FOR_STMT accessors. These give access to the init statement,
condition, update expression, and body of the for statement,
respectively. */
#define FOR_INIT_STMT(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 0)
#define FOR_COND(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 1)
#define FOR_EXPR(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 2)
#define FOR_BODY(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 3)
#define FOR_SCOPE(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 4)
/* RANGE_FOR_STMT accessors. These give access to the declarator,
expression, body, and scope of the statement, respectively. */
#define RANGE_FOR_DECL(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 0)
#define RANGE_FOR_EXPR(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 1)
#define RANGE_FOR_BODY(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 2)
#define RANGE_FOR_SCOPE(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 3)
#define RANGE_FOR_UNROLL(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 4)
#define RANGE_FOR_IVDEP(NODE) TREE_LANG_FLAG_6 (RANGE_FOR_STMT_CHECK (NODE))
#define SWITCH_STMT_COND(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 0)
#define SWITCH_STMT_BODY(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 1)
#define SWITCH_STMT_TYPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 2)
#define SWITCH_STMT_SCOPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 3)
/* True if there are case labels for all possible values of switch cond, either
because there is a default: case label or because the case label ranges cover
all values. */
#define SWITCH_STMT_ALL_CASES_P(NODE) \
TREE_LANG_FLAG_0 (SWITCH_STMT_CHECK (NODE))
/* True if the body of a switch stmt contains no BREAK_STMTs. */
#define SWITCH_STMT_NO_BREAK_P(NODE) \
TREE_LANG_FLAG_2 (SWITCH_STMT_CHECK (NODE))
/* STMT_EXPR accessor. */
#define STMT_EXPR_STMT(NODE) TREE_OPERAND (STMT_EXPR_CHECK (NODE), 0)
/* EXPR_STMT accessor. This gives the expression associated with an
expression statement. */
#define EXPR_STMT_EXPR(NODE) TREE_OPERAND (EXPR_STMT_CHECK (NODE), 0)
/* True if this TARGET_EXPR was created by build_cplus_new, and so we can
discard it if it isn't useful. */
#define TARGET_EXPR_IMPLICIT_P(NODE) \
TREE_LANG_FLAG_0 (TARGET_EXPR_CHECK (NODE))
/* True if this TARGET_EXPR is the result of list-initialization of a
temporary. */
#define TARGET_EXPR_LIST_INIT_P(NODE) \
TREE_LANG_FLAG_1 (TARGET_EXPR_CHECK (NODE))
/* True if this TARGET_EXPR expresses direct-initialization of an object
to be named later. */
#define TARGET_EXPR_DIRECT_INIT_P(NODE) \
TREE_LANG_FLAG_2 (TARGET_EXPR_CHECK (NODE))
/* True if NODE is a TARGET_EXPR that just expresses a copy of its INITIAL; if
the initializer has void type, it's doing something more complicated. */
#define SIMPLE_TARGET_EXPR_P(NODE) \
(TREE_CODE (NODE) == TARGET_EXPR \
&& !VOID_TYPE_P (TREE_TYPE (TARGET_EXPR_INITIAL (NODE))))
/* True if EXPR expresses direct-initialization of a TYPE. */
#define DIRECT_INIT_EXPR_P(TYPE,EXPR) \
(TREE_CODE (EXPR) == TARGET_EXPR && TREE_LANG_FLAG_2 (EXPR) \
&& same_type_ignoring_top_level_qualifiers_p (TYPE, TREE_TYPE (EXPR)))
/* True if this CONVERT_EXPR is for a conversion to virtual base in
an NSDMI, and should be re-evaluated when used in a constructor. */
#define CONVERT_EXPR_VBASE_PATH(NODE) \
TREE_LANG_FLAG_0 (CONVERT_EXPR_CHECK (NODE))
/* True if SIZEOF_EXPR argument is type. */
#define SIZEOF_EXPR_TYPE_P(NODE) \
TREE_LANG_FLAG_0 (SIZEOF_EXPR_CHECK (NODE))
/* True if the ALIGNOF_EXPR was spelled "alignof". */
#define ALIGNOF_EXPR_STD_P(NODE) \
TREE_LANG_FLAG_0 (ALIGNOF_EXPR_CHECK (NODE))
/* An enumeration of the kind of tags that C++ accepts. */
enum tag_types {
none_type = 0, /* Not a tag type. */
record_type, /* "struct" types. */
class_type, /* "class" types. */
union_type, /* "union" types. */
enum_type, /* "enum" types. */
typename_type, /* "typename" types. */
scope_type /* namespace or tagged type name followed by :: */
};
/* The various kinds of lvalues we distinguish. */
enum cp_lvalue_kind_flags {
clk_none = 0, /* Things that are not an lvalue. */
clk_ordinary = 1, /* An ordinary lvalue. */
clk_rvalueref = 2,/* An xvalue (rvalue formed using an rvalue reference) */
clk_class = 4, /* A prvalue of class or array type. */
clk_bitfield = 8, /* An lvalue for a bit-field. */
clk_packed = 16 /* An lvalue for a packed field. */
};
/* This type is used for parameters and variables which hold
combinations of the flags in enum cp_lvalue_kind_flags. */
typedef int cp_lvalue_kind;
/* Various kinds of template specialization, instantiation, etc. */
enum tmpl_spec_kind {
tsk_none, /* Not a template at all. */
tsk_invalid_member_spec, /* An explicit member template
specialization, but the enclosing
classes have not all been explicitly
specialized. */
tsk_invalid_expl_inst, /* An explicit instantiation containing
template parameter lists. */
tsk_excessive_parms, /* A template declaration with too many
template parameter lists. */
tsk_insufficient_parms, /* A template declaration with too few
parameter lists. */
tsk_template, /* A template declaration. */
tsk_expl_spec, /* An explicit specialization. */
tsk_expl_inst /* An explicit instantiation. */
};
/* The various kinds of access. BINFO_ACCESS depends on these being
two bit quantities. The numerical values are important; they are
used to initialize RTTI data structures, so changing them changes
the ABI. */
enum access_kind {
ak_none = 0, /* Inaccessible. */
ak_public = 1, /* Accessible, as a `public' thing. */
ak_protected = 2, /* Accessible, as a `protected' thing. */
ak_private = 3 /* Accessible, as a `private' thing. */
};
/* The various kinds of special functions. If you add to this list,
you should update special_function_p as well. */
enum special_function_kind {
sfk_none = 0, /* Not a special function. This enumeral
must have value zero; see
special_function_p. */
sfk_constructor, /* A constructor. */
sfk_copy_constructor, /* A copy constructor. */
sfk_move_constructor, /* A move constructor. */
sfk_copy_assignment, /* A copy assignment operator. */
sfk_move_assignment, /* A move assignment operator. */
sfk_destructor, /* A destructor. */
sfk_complete_destructor, /* A destructor for complete objects. */
sfk_base_destructor, /* A destructor for base subobjects. */
sfk_deleting_destructor, /* A destructor for complete objects that
deletes the object after it has been
destroyed. */
sfk_conversion, /* A conversion operator. */
sfk_deduction_guide, /* A class template deduction guide. */
sfk_inheriting_constructor /* An inheriting constructor */
};
/* The various kinds of linkage. From [basic.link],
A name is said to have linkage when it might denote the same
object, reference, function, type, template, namespace or value
as a name introduced in another scope:
-- When a name has external linkage, the entity it denotes can
be referred to from scopes of other translation units or from
other scopes of the same translation unit.
-- When a name has internal linkage, the entity it denotes can
be referred to by names from other scopes in the same
translation unit.
-- When a name has no linkage, the entity it denotes cannot be
referred to by names from other scopes. */
enum linkage_kind {
lk_none, /* No linkage. */
lk_internal, /* Internal linkage. */
lk_external /* External linkage. */
};
enum duration_kind {
dk_static,
dk_thread,
dk_auto,
dk_dynamic
};
/* Bitmask flags to control type substitution. */
enum tsubst_flags {
tf_none = 0, /* nothing special */
tf_error = 1 << 0, /* give error messages */
tf_warning = 1 << 1, /* give warnings too */
tf_ignore_bad_quals = 1 << 2, /* ignore bad cvr qualifiers */
tf_keep_type_decl = 1 << 3, /* retain typedef type decls
(make_typename_type use) */
tf_ptrmem_ok = 1 << 4, /* pointers to member ok (internal
instantiate_type use) */
tf_user = 1 << 5, /* found template must be a user template
(lookup_template_class use) */
tf_conv = 1 << 6, /* We are determining what kind of
conversion might be permissible,
not actually performing the
conversion. */
tf_decltype = 1 << 7, /* We are the operand of decltype.
Used to implement the special rules
for calls in decltype (5.2.2/11). */
tf_partial = 1 << 8, /* Doing initial explicit argument
substitution in fn_type_unification. */
tf_fndecl_type = 1 << 9, /* Substituting the type of a function
declaration. */
tf_no_cleanup = 1 << 10, /* Do not build a cleanup
(build_target_expr and friends) */
/* Convenient substitution flags combinations. */
tf_warning_or_error = tf_warning | tf_error
};
/* This type is used for parameters and variables which hold
combinations of the flags in enum tsubst_flags. */
typedef int tsubst_flags_t;
/* The kind of checking we can do looking in a class hierarchy. */
enum base_access_flags {
ba_any = 0, /* Do not check access, allow an ambiguous base,
prefer a non-virtual base */
ba_unique = 1 << 0, /* Must be a unique base. */
ba_check_bit = 1 << 1, /* Check access. */
ba_check = ba_unique | ba_check_bit,
ba_ignore_scope = 1 << 2 /* Ignore access allowed by local scope. */
};
/* This type is used for parameters and variables which hold
combinations of the flags in enum base_access_flags. */
typedef int base_access;
/* The various kinds of access check during parsing. */
enum deferring_kind {
dk_no_deferred = 0, /* Check access immediately */
dk_deferred = 1, /* Deferred check */
dk_no_check = 2 /* No access check */
};
/* The kind of base we can find, looking in a class hierarchy.
Values <0 indicate we failed. */
enum base_kind {
bk_inaccessible = -3, /* The base is inaccessible */
bk_ambig = -2, /* The base is ambiguous */
bk_not_base = -1, /* It is not a base */
bk_same_type = 0, /* It is the same type */
bk_proper_base = 1, /* It is a proper base */
bk_via_virtual = 2 /* It is a proper base, but via a virtual
path. This might not be the canonical
binfo. */
};
/* Node for "pointer to (virtual) function".
This may be distinct from ptr_type_node so gdb can distinguish them. */
#define vfunc_ptr_type_node vtable_entry_type
/* For building calls to `delete'. */
extern GTY(()) tree integer_two_node;
/* The number of function bodies which we are currently processing.
(Zero if we are at namespace scope, one inside the body of a
function, two inside the body of a function in a local class, etc.) */
extern int function_depth;
/* Nonzero if we are inside eq_specializations, which affects comparison of
PARM_DECLs in cp_tree_equal. */
extern int comparing_specializations;
/* In parser.c. */
/* Nonzero if we are parsing an unevaluated operand: an operand to
sizeof, typeof, or alignof. This is a count since operands to
sizeof can be nested. */
extern int cp_unevaluated_operand;
/* RAII class used to inhibit the evaluation of operands during parsing
and template instantiation. Evaluation warnings are also inhibited. */
struct cp_unevaluated
{
cp_unevaluated ();
~cp_unevaluated ();
};
/* in pt.c */
/* These values are used for the `STRICT' parameter to type_unification and
fn_type_unification. Their meanings are described with the
documentation for fn_type_unification. */
enum unification_kind_t {
DEDUCE_CALL,
DEDUCE_CONV,
DEDUCE_EXACT
};
// An RAII class used to create a new pointer map for local
// specializations. When the stack goes out of scope, the
// previous pointer map is restored.
enum lss_policy { lss_blank, lss_copy };
struct local_specialization_stack
{
local_specialization_stack (lss_policy = lss_blank);
~local_specialization_stack ();
hash_map<tree, tree> *saved;
};
/* in class.c */
extern int current_class_depth;
/* An array of all local classes present in this translation unit, in
declaration order. */
extern GTY(()) vec<tree, va_gc> *local_classes;
/* in decl.c */
/* An array of static vars & fns. */
extern GTY(()) vec<tree, va_gc> *static_decls;
/* An array of vtable-needing types that have no key function, or have
an emitted key function. */
extern GTY(()) vec<tree, va_gc> *keyed_classes;
/* Here's where we control how name mangling takes place. */
/* Cannot use '$' up front, because this confuses gdb
(names beginning with '$' are gdb-local identifiers).
Note that all forms in which the '$' is significant are long enough
for direct indexing (meaning that if we know there is a '$'
at a particular location, we can index into the string at
any other location that provides distinguishing characters). */
/* Define NO_DOT_IN_LABEL in your favorite tm file if your assembler
doesn't allow '.' in symbol names. */
#ifndef NO_DOT_IN_LABEL
#define JOINER '.'
#define AUTO_TEMP_NAME "_.tmp_"
#define VFIELD_BASE ".vf"
#define VFIELD_NAME "_vptr."
#define VFIELD_NAME_FORMAT "_vptr.%s"
#else /* NO_DOT_IN_LABEL */
#ifndef NO_DOLLAR_IN_LABEL
#define JOINER '$'
#define AUTO_TEMP_NAME "_$tmp_"
#define VFIELD_BASE "$vf"
#define VFIELD_NAME "_vptr$"
#define VFIELD_NAME_FORMAT "_vptr$%s"
#else /* NO_DOLLAR_IN_LABEL */
#define AUTO_TEMP_NAME "__tmp_"
#define TEMP_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), AUTO_TEMP_NAME, \
sizeof (AUTO_TEMP_NAME) - 1))
#define VTABLE_NAME "__vt_"
#define VTABLE_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), VTABLE_NAME, \
sizeof (VTABLE_NAME) - 1))
#define VFIELD_BASE "__vfb"
#define VFIELD_NAME "__vptr_"
#define VFIELD_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, \
sizeof (VFIELD_NAME) - 1))
#define VFIELD_NAME_FORMAT "__vptr_%s"
#endif /* NO_DOLLAR_IN_LABEL */
#endif /* NO_DOT_IN_LABEL */
#define LAMBDANAME_PREFIX "__lambda"
#define LAMBDANAME_FORMAT LAMBDANAME_PREFIX "%d"
#define UDLIT_OP_ANSI_PREFIX "operator\"\""
#define UDLIT_OP_ANSI_FORMAT UDLIT_OP_ANSI_PREFIX "%s"
#define UDLIT_OP_MANGLED_PREFIX "li"
#define UDLIT_OP_MANGLED_FORMAT UDLIT_OP_MANGLED_PREFIX "%s"
#define UDLIT_OPER_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), \
UDLIT_OP_ANSI_PREFIX, \
sizeof (UDLIT_OP_ANSI_PREFIX) - 1))
#define UDLIT_OP_SUFFIX(ID_NODE) \
(IDENTIFIER_POINTER (ID_NODE) + sizeof (UDLIT_OP_ANSI_PREFIX) - 1)
#if !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL)
#define VTABLE_NAME_P(ID_NODE) (IDENTIFIER_POINTER (ID_NODE)[1] == 'v' \
&& IDENTIFIER_POINTER (ID_NODE)[2] == 't' \
&& IDENTIFIER_POINTER (ID_NODE)[3] == JOINER)
#define TEMP_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), AUTO_TEMP_NAME, sizeof (AUTO_TEMP_NAME)-1))
#define VFIELD_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, sizeof(VFIELD_NAME)-1))
#endif /* !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL) */
/* Nonzero if we're done parsing and into end-of-file activities.
Two if we're done with front-end processing. */
extern int at_eof;
/* True if note_mangling_alias should enqueue mangling aliases for
later generation, rather than emitting them right away. */
extern bool defer_mangling_aliases;
/* True if noexcept is part of the type (i.e. in C++17). */
extern bool flag_noexcept_type;
/* A list of namespace-scope objects which have constructors or
destructors which reside in the global scope. The decl is stored
in the TREE_VALUE slot and the initializer is stored in the
TREE_PURPOSE slot. */
extern GTY(()) tree static_aggregates;
/* Likewise, for thread local storage. */
extern GTY(()) tree tls_aggregates;
enum overload_flags { NO_SPECIAL = 0, DTOR_FLAG, TYPENAME_FLAG };
/* These are uses as bits in flags passed to various functions to
control their behavior. Despite the LOOKUP_ prefix, many of these
do not control name lookup. ??? Functions using these flags should
probably be modified to accept explicit boolean flags for the
behaviors relevant to them. */
/* Check for access violations. */
#define LOOKUP_PROTECT (1 << 0)
#define LOOKUP_NORMAL (LOOKUP_PROTECT)
/* Even if the function found by lookup is a virtual function, it
should be called directly. */
#define LOOKUP_NONVIRTUAL (1 << 1)
/* Non-converting (i.e., "explicit") constructors are not tried. This flag
indicates that we are not performing direct-initialization. */
#define LOOKUP_ONLYCONVERTING (1 << 2)
#define LOOKUP_IMPLICIT (LOOKUP_NORMAL | LOOKUP_ONLYCONVERTING)
/* If a temporary is created, it should be created so that it lives
as long as the current variable bindings; otherwise it only lives
until the end of the complete-expression. It also forces
direct-initialization in cases where other parts of the compiler
have already generated a temporary, such as reference
initialization and the catch parameter. */
#define DIRECT_BIND (1 << 3)
/* We're performing a user-defined conversion, so more user-defined
conversions are not permitted (only built-in conversions). */
#define LOOKUP_NO_CONVERSION (1 << 4)
/* The user has explicitly called a destructor. (Therefore, we do
not need to check that the object is non-NULL before calling the
destructor.) */
#define LOOKUP_DESTRUCTOR (1 << 5)
/* Do not permit references to bind to temporaries. */
#define LOOKUP_NO_TEMP_BIND (1 << 6)
/* Do not accept objects, and possibly namespaces. */
#define LOOKUP_PREFER_TYPES (1 << 7)
/* Do not accept objects, and possibly types. */
#define LOOKUP_PREFER_NAMESPACES (1 << 8)
/* Accept types or namespaces. */
#define LOOKUP_PREFER_BOTH (LOOKUP_PREFER_TYPES | LOOKUP_PREFER_NAMESPACES)
/* Return friend declarations and un-declared builtin functions.
(Normally, these entities are registered in the symbol table, but
not found by lookup.) */
#define LOOKUP_HIDDEN (LOOKUP_PREFER_NAMESPACES << 1)
/* We're trying to treat an lvalue as an rvalue. */
#define LOOKUP_PREFER_RVALUE (LOOKUP_HIDDEN << 1)
/* We're inside an init-list, so narrowing conversions are ill-formed. */
#define LOOKUP_NO_NARROWING (LOOKUP_PREFER_RVALUE << 1)
/* We're looking up a constructor for list-initialization. */
#define LOOKUP_LIST_INIT_CTOR (LOOKUP_NO_NARROWING << 1)
/* This is the first parameter of a copy constructor. */
#define LOOKUP_COPY_PARM (LOOKUP_LIST_INIT_CTOR << 1)
/* We only want to consider list constructors. */
#define LOOKUP_LIST_ONLY (LOOKUP_COPY_PARM << 1)
/* Return after determining which function to call and checking access.
Used by sythesized_method_walk to determine which functions will
be called to initialize subobjects, in order to determine exception
specification and possible implicit delete.
This is kind of a hack, but exiting early avoids problems with trying
to perform argument conversions when the class isn't complete yet. */
#define LOOKUP_SPECULATIVE (LOOKUP_LIST_ONLY << 1)
/* Used by calls from defaulted functions to limit the overload set to avoid
cycles trying to declare them (core issue 1092). */
#define LOOKUP_DEFAULTED (LOOKUP_SPECULATIVE << 1)
/* Used in calls to store_init_value to suppress its usual call to
digest_init. */
#define LOOKUP_ALREADY_DIGESTED (LOOKUP_DEFAULTED << 1)
/* An instantiation with explicit template arguments. */
#define LOOKUP_EXPLICIT_TMPL_ARGS (LOOKUP_ALREADY_DIGESTED << 1)
/* Like LOOKUP_NO_TEMP_BIND, but also prevent binding to xvalues. */
#define LOOKUP_NO_RVAL_BIND (LOOKUP_EXPLICIT_TMPL_ARGS << 1)
/* Used by case_conversion to disregard non-integral conversions. */
#define LOOKUP_NO_NON_INTEGRAL (LOOKUP_NO_RVAL_BIND << 1)
/* Used for delegating constructors in order to diagnose self-delegation. */
#define LOOKUP_DELEGATING_CONS (LOOKUP_NO_NON_INTEGRAL << 1)
#define LOOKUP_NAMESPACES_ONLY(F) \
(((F) & LOOKUP_PREFER_NAMESPACES) && !((F) & LOOKUP_PREFER_TYPES))
#define LOOKUP_TYPES_ONLY(F) \
(!((F) & LOOKUP_PREFER_NAMESPACES) && ((F) & LOOKUP_PREFER_TYPES))
#define LOOKUP_QUALIFIERS_ONLY(F) ((F) & LOOKUP_PREFER_BOTH)
/* These flags are used by the conversion code.
CONV_IMPLICIT : Perform implicit conversions (standard and user-defined).
CONV_STATIC : Perform the explicit conversions for static_cast.
CONV_CONST : Perform the explicit conversions for const_cast.
CONV_REINTERPRET: Perform the explicit conversions for reinterpret_cast.
CONV_PRIVATE : Perform upcasts to private bases.
CONV_FORCE_TEMP : Require a new temporary when converting to the same
aggregate type. */
#define CONV_IMPLICIT 1
#define CONV_STATIC 2
#define CONV_CONST 4
#define CONV_REINTERPRET 8
#define CONV_PRIVATE 16
/* #define CONV_NONCONVERTING 32 */
#define CONV_FORCE_TEMP 64
#define CONV_FOLD 128
#define CONV_OLD_CONVERT (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \
| CONV_REINTERPRET)
#define CONV_C_CAST (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \
| CONV_REINTERPRET | CONV_PRIVATE | CONV_FORCE_TEMP)
#define CONV_BACKEND_CONVERT (CONV_OLD_CONVERT | CONV_FOLD)
/* Used by build_expr_type_conversion to indicate which types are
acceptable as arguments to the expression under consideration. */
#define WANT_INT 1 /* integer types, including bool */
#define WANT_FLOAT 2 /* floating point types */
#define WANT_ENUM 4 /* enumerated types */
#define WANT_POINTER 8 /* pointer types */
#define WANT_NULL 16 /* null pointer constant */
#define WANT_VECTOR_OR_COMPLEX 32 /* vector or complex types */
#define WANT_ARITH (WANT_INT | WANT_FLOAT | WANT_VECTOR_OR_COMPLEX)
/* Used with comptypes, and related functions, to guide type
comparison. */
#define COMPARE_STRICT 0 /* Just check if the types are the
same. */
#define COMPARE_BASE 1 /* Check to see if the second type is
derived from the first. */
#define COMPARE_DERIVED 2 /* Like COMPARE_BASE, but in
reverse. */
#define COMPARE_REDECLARATION 4 /* The comparison is being done when
another declaration of an existing
entity is seen. */
#define COMPARE_STRUCTURAL 8 /* The comparison is intended to be
structural. The actual comparison
will be identical to
COMPARE_STRICT. */
/* Used with start function. */
#define SF_DEFAULT 0 /* No flags. */
#define SF_PRE_PARSED 1 /* The function declaration has
already been parsed. */
#define SF_INCLASS_INLINE 2 /* The function is an inline, defined
in the class body. */
/* Used with start_decl's initialized parameter. */
#define SD_UNINITIALIZED 0
#define SD_INITIALIZED 1
#define SD_DEFAULTED 2
#define SD_DELETED 3
/* Returns nonzero iff TYPE1 and TYPE2 are the same type, or if TYPE2
is derived from TYPE1, or if TYPE2 is a pointer (reference) to a
class derived from the type pointed to (referred to) by TYPE1. */
#define same_or_base_type_p(TYPE1, TYPE2) \
comptypes ((TYPE1), (TYPE2), COMPARE_BASE)
/* These macros are used to access a TEMPLATE_PARM_INDEX. */
#define TEMPLATE_PARM_INDEX_CAST(NODE) \
((template_parm_index*)TEMPLATE_PARM_INDEX_CHECK (NODE))
#define TEMPLATE_PARM_IDX(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->index)
#define TEMPLATE_PARM_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->level)
#define TEMPLATE_PARM_DESCENDANTS(NODE) (TREE_CHAIN (NODE))
#define TEMPLATE_PARM_ORIG_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->orig_level)
#define TEMPLATE_PARM_DECL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->decl)
#define TEMPLATE_PARM_PARAMETER_PACK(NODE) \
(TREE_LANG_FLAG_0 (TEMPLATE_PARM_INDEX_CHECK (NODE)))
/* These macros are for accessing the fields of TEMPLATE_TYPE_PARM,
TEMPLATE_TEMPLATE_PARM and BOUND_TEMPLATE_TEMPLATE_PARM nodes. */
#define TEMPLATE_TYPE_PARM_INDEX(NODE) \
(TYPE_VALUES_RAW (TREE_CHECK3 ((NODE), TEMPLATE_TYPE_PARM, \
TEMPLATE_TEMPLATE_PARM, \
BOUND_TEMPLATE_TEMPLATE_PARM)))
#define TEMPLATE_TYPE_IDX(NODE) \
(TEMPLATE_PARM_IDX (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_LEVEL(NODE) \
(TEMPLATE_PARM_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_ORIG_LEVEL(NODE) \
(TEMPLATE_PARM_ORIG_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_DECL(NODE) \
(TEMPLATE_PARM_DECL (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_PARAMETER_PACK(NODE) \
(TEMPLATE_PARM_PARAMETER_PACK (TEMPLATE_TYPE_PARM_INDEX (NODE)))
/* For a C++17 class deduction placeholder, the template it represents. */
#define CLASS_PLACEHOLDER_TEMPLATE(NODE) \
(DECL_INITIAL (TYPE_NAME (TEMPLATE_TYPE_PARM_CHECK (NODE))))
/* Contexts in which auto deduction occurs. These flags are
used to control diagnostics in do_auto_deduction. */
enum auto_deduction_context
{
adc_unspecified, /* Not given */
adc_variable_type, /* Variable initializer deduction */
adc_return_type, /* Return type deduction */
adc_unify, /* Template argument deduction */
adc_requirement, /* Argument deduction constraint */
adc_decomp_type /* Decomposition declaration initializer deduction */
};
/* True if this type-parameter belongs to a class template, used by C++17
class template argument deduction. */
#define TEMPLATE_TYPE_PARM_FOR_CLASS(NODE) \
(TREE_LANG_FLAG_0 (TEMPLATE_TYPE_PARM_CHECK (NODE)))
/* True iff this TEMPLATE_TYPE_PARM represents decltype(auto). */
#define AUTO_IS_DECLTYPE(NODE) \
(TYPE_LANG_FLAG_5 (TEMPLATE_TYPE_PARM_CHECK (NODE)))
/* These constants can used as bit flags in the process of tree formatting.
TFF_PLAIN_IDENTIFIER: unqualified part of a name.
TFF_SCOPE: include the class and namespace scope of the name.
TFF_CHASE_TYPEDEF: print the original type-id instead of the typedef-name.
TFF_DECL_SPECIFIERS: print decl-specifiers.
TFF_CLASS_KEY_OR_ENUM: precede a class-type name (resp. enum name) with
a class-key (resp. `enum').
TFF_RETURN_TYPE: include function return type.
TFF_FUNCTION_DEFAULT_ARGUMENTS: include function default parameter values.
TFF_EXCEPTION_SPECIFICATION: show function exception specification.
TFF_TEMPLATE_HEADER: show the template<...> header in a
template-declaration.
TFF_TEMPLATE_NAME: show only template-name.
TFF_EXPR_IN_PARENS: parenthesize expressions.
TFF_NO_FUNCTION_ARGUMENTS: don't show function arguments.
TFF_UNQUALIFIED_NAME: do not print the qualifying scope of the
top-level entity.
TFF_NO_OMIT_DEFAULT_TEMPLATE_ARGUMENTS: do not omit template arguments
identical to their defaults.
TFF_NO_TEMPLATE_BINDINGS: do not print information about the template
arguments for a function template specialization.
TFF_POINTER: we are printing a pointer type. */
#define TFF_PLAIN_IDENTIFIER (0)
#define TFF_SCOPE (1)
#define TFF_CHASE_TYPEDEF (1 << 1)
#define TFF_DECL_SPECIFIERS (1 << 2)
#define TFF_CLASS_KEY_OR_ENUM (1 << 3)
#define TFF_RETURN_TYPE (1 << 4)
#define TFF_FUNCTION_DEFAULT_ARGUMENTS (1 << 5)
#define TFF_EXCEPTION_SPECIFICATION (1 << 6)
#define TFF_TEMPLATE_HEADER (1 << 7)
#define TFF_TEMPLATE_NAME (1 << 8)
#define TFF_EXPR_IN_PARENS (1 << 9)
#define TFF_NO_FUNCTION_ARGUMENTS (1 << 10)
#define TFF_UNQUALIFIED_NAME (1 << 11)
#define TFF_NO_OMIT_DEFAULT_TEMPLATE_ARGUMENTS (1 << 12)
#define TFF_NO_TEMPLATE_BINDINGS (1 << 13)
#define TFF_POINTER (1 << 14)
/* Returns the TEMPLATE_DECL associated to a TEMPLATE_TEMPLATE_PARM
node. */
#define TEMPLATE_TEMPLATE_PARM_TEMPLATE_DECL(NODE) \
((TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM) \
? TYPE_TI_TEMPLATE (NODE) \
: TYPE_NAME (NODE))
/* in lex.c */
extern void init_reswords (void);
/* Various flags for the overloaded operator information. */
enum ovl_op_flags
{
OVL_OP_FLAG_NONE = 0, /* Don't care. */
OVL_OP_FLAG_UNARY = 1, /* Is unary. */
OVL_OP_FLAG_BINARY = 2, /* Is binary. */
OVL_OP_FLAG_AMBIARY = 3, /* May be unary or binary. */
OVL_OP_FLAG_ALLOC = 4, /* operator new or delete. */
OVL_OP_FLAG_DELETE = 1, /* operator delete. */
OVL_OP_FLAG_VEC = 2 /* vector new or delete. */
};
/* Compressed operator codes. Order is determined by operators.def
and does not match that of tree_codes. */
enum ovl_op_code
{
OVL_OP_ERROR_MARK,
OVL_OP_NOP_EXPR,
#define DEF_OPERATOR(NAME, CODE, MANGLING, FLAGS) OVL_OP_##CODE,
#define DEF_ASSN_OPERATOR(NAME, CODE, MANGLING) /* NOTHING */
#include "operators.def"
OVL_OP_MAX
};
struct GTY(()) ovl_op_info_t {
/* The IDENTIFIER_NODE for the operator. */
tree identifier;
/* The name of the operator. */
const char *name;
/* The mangled name of the operator. */
const char *mangled_name;
/* The (regular) tree code. */
enum tree_code tree_code : 16;
/* The (compressed) operator code. */
enum ovl_op_code ovl_op_code : 8;
/* The ovl_op_flags of the operator */
unsigned flags : 8;
};
/* Overloaded operator info indexed by ass_op_p & ovl_op_code. */
extern GTY(()) ovl_op_info_t ovl_op_info[2][OVL_OP_MAX];
/* Mapping from tree_codes to ovl_op_codes. */
extern GTY(()) unsigned char ovl_op_mapping[MAX_TREE_CODES];
/* Mapping for ambi-ary operators from the binary to the unary. */
extern GTY(()) unsigned char ovl_op_alternate[OVL_OP_MAX];
/* Given an ass_op_p boolean and a tree code, return a pointer to its
overloaded operator info. Tree codes for non-overloaded operators
map to the error-operator. */
#define OVL_OP_INFO(IS_ASS_P, TREE_CODE) \
(&ovl_op_info[(IS_ASS_P) != 0][ovl_op_mapping[(TREE_CODE)]])
/* Overloaded operator info for an identifier for which
IDENTIFIER_OVL_OP_P is true. */
#define IDENTIFIER_OVL_OP_INFO(NODE) \
(&ovl_op_info[IDENTIFIER_KIND_BIT_0 (NODE)][IDENTIFIER_CP_INDEX (NODE)])
#define IDENTIFIER_OVL_OP_FLAGS(NODE) \
(IDENTIFIER_OVL_OP_INFO (NODE)->flags)
/* A type-qualifier, or bitmask therefore, using the TYPE_QUAL
constants. */
typedef int cp_cv_quals;
/* Non-static member functions have an optional virt-specifier-seq.
There is a VIRT_SPEC value for each virt-specifier.
They can be combined by bitwise-or to form the complete set of
virt-specifiers for a member function. */
enum virt_specifier
{
VIRT_SPEC_UNSPECIFIED = 0x0,
VIRT_SPEC_FINAL = 0x1,
VIRT_SPEC_OVERRIDE = 0x2
};
/* A type-qualifier, or bitmask therefore, using the VIRT_SPEC
constants. */
typedef int cp_virt_specifiers;
/* Wherever there is a function-cv-qual, there could also be a ref-qualifier:
[dcl.fct]
The return type, the parameter-type-list, the ref-qualifier, and
the cv-qualifier-seq, but not the default arguments or the exception
specification, are part of the function type.
REF_QUAL_NONE Ordinary member function with no ref-qualifier
REF_QUAL_LVALUE Member function with the &-ref-qualifier
REF_QUAL_RVALUE Member function with the &&-ref-qualifier */
enum cp_ref_qualifier {
REF_QUAL_NONE = 0,
REF_QUAL_LVALUE = 1,
REF_QUAL_RVALUE = 2
};
/* A storage class. */
enum cp_storage_class {
/* sc_none must be zero so that zeroing a cp_decl_specifier_seq
sets the storage_class field to sc_none. */
sc_none = 0,
sc_auto,
sc_register,
sc_static,
sc_extern,
sc_mutable
};
/* An individual decl-specifier. This is used to index the array of
locations for the declspecs in struct cp_decl_specifier_seq
below. */
enum cp_decl_spec {
ds_first,
ds_signed = ds_first,
ds_unsigned,
ds_short,
ds_long,
ds_const,
ds_volatile,
ds_restrict,
ds_inline,
ds_virtual,
ds_explicit,
ds_friend,
ds_typedef,
ds_alias,
ds_constexpr,
ds_complex,
ds_thread,
ds_type_spec,
ds_redefined_builtin_type_spec,
ds_attribute,
ds_std_attribute,
ds_storage_class,
ds_long_long,
ds_concept,
ds_last /* This enumerator must always be the last one. */
};
/* A decl-specifier-seq. */
struct cp_decl_specifier_seq {
/* An array of locations for the declaration sepecifiers, indexed by
enum cp_decl_spec_word. */
source_location locations[ds_last];
/* The primary type, if any, given by the decl-specifier-seq.
Modifiers, like "short", "const", and "unsigned" are not
reflected here. This field will be a TYPE, unless a typedef-name
was used, in which case it will be a TYPE_DECL. */
tree type;
/* The attributes, if any, provided with the specifier sequence. */
tree attributes;
/* The c++11 attributes that follows the type specifier. */
tree std_attributes;
/* If non-NULL, a built-in type that the user attempted to redefine
to some other type. */
tree redefined_builtin_type;
/* The storage class specified -- or sc_none if no storage class was
explicitly specified. */
cp_storage_class storage_class;
/* For the __intN declspec, this stores the index into the int_n_* arrays. */
int int_n_idx;
/* True iff TYPE_SPEC defines a class or enum. */
BOOL_BITFIELD type_definition_p : 1;
/* True iff multiple types were (erroneously) specified for this
decl-specifier-seq. */
BOOL_BITFIELD multiple_types_p : 1;
/* True iff multiple storage classes were (erroneously) specified
for this decl-specifier-seq or a combination of a storage class
with a typedef specifier. */
BOOL_BITFIELD conflicting_specifiers_p : 1;
/* True iff at least one decl-specifier was found. */
BOOL_BITFIELD any_specifiers_p : 1;
/* True iff at least one type-specifier was found. */
BOOL_BITFIELD any_type_specifiers_p : 1;
/* True iff "int" was explicitly provided. */
BOOL_BITFIELD explicit_int_p : 1;
/* True iff "__intN" was explicitly provided. */
BOOL_BITFIELD explicit_intN_p : 1;
/* True iff "char" was explicitly provided. */
BOOL_BITFIELD explicit_char_p : 1;
/* True iff ds_thread is set for __thread, not thread_local. */
BOOL_BITFIELD gnu_thread_keyword_p : 1;
/* True iff the type is a decltype. */
BOOL_BITFIELD decltype_p : 1;
};
/* The various kinds of declarators. */
enum cp_declarator_kind {
cdk_id,
cdk_function,
cdk_array,
cdk_pointer,
cdk_reference,
cdk_ptrmem,
cdk_decomp,
cdk_error
};
/* A declarator. */
typedef struct cp_declarator cp_declarator;
typedef struct cp_parameter_declarator cp_parameter_declarator;
/* A parameter, before it has been semantically analyzed. */
struct cp_parameter_declarator {
/* The next parameter, or NULL_TREE if none. */
cp_parameter_declarator *next;
/* The decl-specifiers-seq for the parameter. */
cp_decl_specifier_seq decl_specifiers;
/* The declarator for the parameter. */
cp_declarator *declarator;
/* The default-argument expression, or NULL_TREE, if none. */
tree default_argument;
/* True iff this is a template parameter pack. */
bool template_parameter_pack_p;
/* Location within source. */
location_t loc;
};
/* A declarator. */
struct cp_declarator {
/* The kind of declarator. */
ENUM_BITFIELD (cp_declarator_kind) kind : 4;
/* Whether we parsed an ellipsis (`...') just before the declarator,
to indicate this is a parameter pack. */
BOOL_BITFIELD parameter_pack_p : 1;
/* If this declarator is parenthesized, this the open-paren. It is
UNKNOWN_LOCATION when not parenthesized. */
location_t parenthesized;
location_t id_loc; /* Currently only set for cdk_id, cdk_decomp and
cdk_function. */
/* GNU Attributes that apply to this declarator. If the declarator
is a pointer or a reference, these attribute apply to the type
pointed to. */
tree attributes;
/* Standard C++11 attributes that apply to this declarator. If the
declarator is a pointer or a reference, these attributes apply
to the pointer, rather than to the type pointed to. */
tree std_attributes;
/* For all but cdk_id, cdk_decomp and cdk_error, the contained declarator.
For cdk_id, cdk_decomp and cdk_error, guaranteed to be NULL. */
cp_declarator *declarator;
union {
/* For identifiers. */
struct {
/* If non-NULL, the qualifying scope (a NAMESPACE_DECL or
*_TYPE) for this identifier. */
tree qualifying_scope;
/* The unqualified name of the entity -- an IDENTIFIER_NODE,
BIT_NOT_EXPR, or TEMPLATE_ID_EXPR. */
tree unqualified_name;
/* If this is the name of a function, what kind of special
function (if any). */
special_function_kind sfk;
} id;
/* For functions. */
struct {
/* The parameters to the function as a TREE_LIST of decl/default. */
tree parameters;
/* The cv-qualifiers for the function. */
cp_cv_quals qualifiers;
/* The virt-specifiers for the function. */
cp_virt_specifiers virt_specifiers;
/* The ref-qualifier for the function. */
cp_ref_qualifier ref_qualifier;
/* The transaction-safety qualifier for the function. */
tree tx_qualifier;
/* The exception-specification for the function. */
tree exception_specification;
/* The late-specified return type, if any. */
tree late_return_type;
/* The trailing requires-clause, if any. */
tree requires_clause;
} function;
/* For arrays. */
struct {
/* The bounds to the array. */
tree bounds;
} array;
/* For cdk_pointer and cdk_ptrmem. */
struct {
/* The cv-qualifiers for the pointer. */
cp_cv_quals qualifiers;
/* For cdk_ptrmem, the class type containing the member. */
tree class_type;
} pointer;
/* For cdk_reference */
struct {
/* The cv-qualifiers for the reference. These qualifiers are
only used to diagnose ill-formed code. */
cp_cv_quals qualifiers;
/* Whether this is an rvalue reference */
bool rvalue_ref;
} reference;
} u;
};
/* A level of template instantiation. */
struct GTY((chain_next ("%h.next"))) tinst_level {
/* The immediately deeper level in the chain. */
struct tinst_level *next;
/* The original node. TLDCL can be a DECL (for a function or static
data member), a TYPE (for a class), depending on what we were
asked to instantiate, or a TREE_LIST with the template as PURPOSE
and the template args as VALUE, if we are substituting for
overload resolution. In all these cases, TARGS is NULL.
However, to avoid creating TREE_LIST objects for substitutions if
we can help, we store PURPOSE and VALUE in TLDCL and TARGS,
respectively. So TLDCL stands for TREE_LIST or DECL (the
template is a DECL too), whereas TARGS stands for the template
arguments. */
tree tldcl, targs;
private:
/* Return TRUE iff the original node is a split list. */
bool split_list_p () const { return targs; }
/* Return TRUE iff the original node is a TREE_LIST object. */
bool tree_list_p () const
{
return !split_list_p () && TREE_CODE (tldcl) == TREE_LIST;
}
/* Return TRUE iff the original node is not a list, split or not. */
bool not_list_p () const
{
return !split_list_p () && !tree_list_p ();
}
/* Convert (in place) the original node from a split list to a
TREE_LIST. */
tree to_list ();
public:
/* Release storage for OBJ and node, if it's a TREE_LIST. */
static void free (tinst_level *obj);
/* Return TRUE iff the original node is a list, split or not. */
bool list_p () const { return !not_list_p (); }
/* Return the original node; if it's a split list, make it a
TREE_LIST first, so that it can be returned as a single tree
object. */
tree get_node () {
if (!split_list_p ()) return tldcl;
else return to_list ();
}
/* Return the original node if it's a DECL or a TREE_LIST, but do
NOT convert a split list to a TREE_LIST: return NULL instead. */
tree maybe_get_node () const {
if (!split_list_p ()) return tldcl;
else return NULL_TREE;
}
/* The location where the template is instantiated. */
location_t locus;
/* errorcount + sorrycount when we pushed this level. */
unsigned short errors;
/* Count references to this object. If refcount reaches
refcount_infinity value, we don't increment or decrement the
refcount anymore, as the refcount isn't accurate anymore.
The object can be still garbage collected if unreferenced from
anywhere, which might keep referenced objects referenced longer than
otherwise necessary. Hitting the infinity is rare though. */
unsigned short refcount;
/* Infinity value for the above refcount. */
static const unsigned short refcount_infinity = (unsigned short) ~0;
};
bool decl_spec_seq_has_spec_p (const cp_decl_specifier_seq *, cp_decl_spec);
/* Return the type of the `this' parameter of FNTYPE. */
inline tree
type_of_this_parm (const_tree fntype)
{
function_args_iterator iter;
gcc_assert (TREE_CODE (fntype) == METHOD_TYPE);
function_args_iter_init (&iter, fntype);
return function_args_iter_cond (&iter);
}
/* Return the class of the `this' parameter of FNTYPE. */
inline tree
class_of_this_parm (const_tree fntype)
{
return TREE_TYPE (type_of_this_parm (fntype));
}
/* True iff T is a variable template declaration. */
inline bool
variable_template_p (tree t)
{
if (TREE_CODE (t) != TEMPLATE_DECL)
return false;
if (!PRIMARY_TEMPLATE_P (t))
return false;
if (tree r = DECL_TEMPLATE_RESULT (t))
return VAR_P (r);
return false;
}
/* True iff T is a variable concept definition. That is, T is
a variable template declared with the concept specifier. */
inline bool
variable_concept_p (tree t)
{
if (TREE_CODE (t) != TEMPLATE_DECL)
return false;
if (tree r = DECL_TEMPLATE_RESULT (t))
return VAR_P (r) && DECL_DECLARED_CONCEPT_P (r);
return false;
}
/* True iff T is a concept definition. That is, T is a variable or function
template declared with the concept specifier. */
inline bool
concept_template_p (tree t)
{
if (TREE_CODE (t) != TEMPLATE_DECL)
return false;
if (tree r = DECL_TEMPLATE_RESULT (t))
return VAR_OR_FUNCTION_DECL_P (r) && DECL_DECLARED_CONCEPT_P (r);
return false;
}
/* A parameter list indicating for a function with no parameters,
e.g "int f(void)". */
extern cp_parameter_declarator *no_parameters;
/* Various dump ids. */
extern int class_dump_id;
extern int raw_dump_id;
/* in call.c */
extern bool check_dtor_name (tree, tree);
int magic_varargs_p (tree);
extern tree build_conditional_expr (location_t, tree, tree, tree,
tsubst_flags_t);
extern tree build_addr_func (tree, tsubst_flags_t);
extern void set_flags_from_callee (tree);
extern tree build_call_a (tree, int, tree*);
extern tree build_call_n (tree, int, ...);
extern bool null_ptr_cst_p (tree);
extern bool null_member_pointer_value_p (tree);
extern bool sufficient_parms_p (const_tree);
extern tree type_decays_to (tree);
extern tree extract_call_expr (tree);
extern tree build_user_type_conversion (tree, tree, int,
tsubst_flags_t);
extern tree build_new_function_call (tree, vec<tree, va_gc> **,
tsubst_flags_t);
extern tree build_operator_new_call (tree, vec<tree, va_gc> **,
tree *, tree *, tree, tree,
tree *, tsubst_flags_t);
extern tree build_new_method_call (tree, tree,
vec<tree, va_gc> **, tree,
int, tree *, tsubst_flags_t);
extern tree build_special_member_call (tree, tree,
vec<tree, va_gc> **,
tree, int, tsubst_flags_t);
extern tree build_new_op (location_t, enum tree_code,
int, tree, tree, tree, tree *,
tsubst_flags_t);
extern tree build_op_call (tree, vec<tree, va_gc> **,
tsubst_flags_t);
extern bool aligned_allocation_fn_p (tree);
extern bool usual_deallocation_fn_p (tree);
extern tree build_op_delete_call (enum tree_code, tree, tree,
bool, tree, tree,
tsubst_flags_t);
extern bool can_convert (tree, tree, tsubst_flags_t);
extern bool can_convert_standard (tree, tree, tsubst_flags_t);
extern bool can_convert_arg (tree, tree, tree, int,
tsubst_flags_t);
extern bool can_convert_arg_bad (tree, tree, tree, int,
tsubst_flags_t);
extern location_t get_fndecl_argument_location (tree, int);
/* A class for recording information about access failures (e.g. private
fields), so that we can potentially supply a fix-it hint about
an accessor (from a context in which the constness of the object
is known). */
class access_failure_info
{
public:
access_failure_info () : m_was_inaccessible (false), m_basetype_path (NULL_TREE),
m_field_decl (NULL_TREE) {}
void record_access_failure (tree basetype_path, tree field_decl);
void maybe_suggest_accessor (bool const_p) const;
private:
bool m_was_inaccessible;
tree m_basetype_path;
tree m_field_decl;
};
extern bool enforce_access (tree, tree, tree,
tsubst_flags_t,
access_failure_info *afi = NULL);
extern void push_defarg_context (tree);
extern void pop_defarg_context (void);
extern tree convert_default_arg (tree, tree, tree, int,
tsubst_flags_t);
extern tree convert_arg_to_ellipsis (tree, tsubst_flags_t);
extern tree build_x_va_arg (source_location, tree, tree);
extern tree cxx_type_promotes_to (tree);
extern tree type_passed_as (tree);
extern tree convert_for_arg_passing (tree, tree, tsubst_flags_t);
extern bool is_properly_derived_from (tree, tree);
extern tree initialize_reference (tree, tree, int,
tsubst_flags_t);
extern tree extend_ref_init_temps (tree, tree, vec<tree, va_gc>**);
extern tree make_temporary_var_for_ref_to_temp (tree, tree);
extern bool type_has_extended_temps (tree);
extern tree strip_top_quals (tree);
extern bool reference_related_p (tree, tree);
extern int remaining_arguments (tree);
extern tree perform_implicit_conversion (tree, tree, tsubst_flags_t);
extern tree perform_implicit_conversion_flags (tree, tree, tsubst_flags_t, int);
extern tree build_converted_constant_expr (tree, tree, tsubst_flags_t);
extern tree perform_direct_initialization_if_possible (tree, tree, bool,
tsubst_flags_t);
extern tree in_charge_arg_for_name (tree);
extern tree build_cxx_call (tree, int, tree *,
tsubst_flags_t);
extern bool is_std_init_list (tree);
extern bool is_list_ctor (tree);
extern void validate_conversion_obstack (void);
extern void mark_versions_used (tree);
extern tree get_function_version_dispatcher (tree);
/* in class.c */
extern tree build_vfield_ref (tree, tree);
extern tree build_if_in_charge (tree true_stmt, tree false_stmt = void_node);
extern tree build_base_path (enum tree_code, tree,
tree, int, tsubst_flags_t);
extern tree convert_to_base (tree, tree, bool, bool,
tsubst_flags_t);
extern tree convert_to_base_statically (tree, tree);
extern tree build_vtbl_ref (tree, tree);
extern tree build_vfn_ref (tree, tree);
extern tree get_vtable_decl (tree, int);
extern bool add_method (tree, tree, bool);
extern tree declared_access (tree);
extern tree currently_open_class (tree);
extern tree currently_open_derived_class (tree);
extern tree outermost_open_class (void);
extern tree current_nonlambda_class_type (void);
extern tree finish_struct (tree, tree);
extern void finish_struct_1 (tree);
extern int resolves_to_fixed_type_p (tree, int *);
extern void init_class_processing (void);
extern int is_empty_class (tree);
extern bool is_really_empty_class (tree);
extern void pushclass (tree);
extern void popclass (void);
extern void push_nested_class (tree);
extern void pop_nested_class (void);
extern int current_lang_depth (void);
extern void push_lang_context (tree);
extern void pop_lang_context (void);
extern tree instantiate_type (tree, tree, tsubst_flags_t);
extern void build_self_reference (void);
extern int same_signature_p (const_tree, const_tree);
extern void maybe_add_class_template_decl_list (tree, tree, int);
extern void unreverse_member_declarations (tree);
extern void invalidate_class_lookup_cache (void);
extern void maybe_note_name_used_in_class (tree, tree);
extern void note_name_declared_in_class (tree, tree);
extern tree get_vtbl_decl_for_binfo (tree);
extern bool vptr_via_virtual_p (tree);
extern void debug_class (tree);
extern void debug_thunks (tree);
extern void set_linkage_according_to_type (tree, tree);
extern void determine_key_method (tree);
extern void check_for_override (tree, tree);
extern void push_class_stack (void);
extern void pop_class_stack (void);
extern bool default_ctor_p (tree);
extern bool type_has_user_nondefault_constructor (tree);
extern tree in_class_defaulted_default_constructor (tree);
extern bool user_provided_p (tree);
extern bool type_has_user_provided_constructor (tree);
extern bool type_has_non_user_provided_default_constructor (tree);
extern bool vbase_has_user_provided_move_assign (tree);
extern tree default_init_uninitialized_part (tree);
extern bool trivial_default_constructor_is_constexpr (tree);
extern bool type_has_constexpr_default_constructor (tree);
extern bool type_has_virtual_destructor (tree);
extern bool classtype_has_move_assign_or_move_ctor_p (tree, bool user_declared);
extern bool classtype_has_non_deleted_move_ctor (tree);
extern bool type_build_ctor_call (tree);
extern bool type_build_dtor_call (tree);
extern void explain_non_literal_class (tree);
extern void inherit_targ_abi_tags (tree);
extern void defaulted_late_check (tree);
extern bool defaultable_fn_check (tree);
extern void check_abi_tags (tree);
extern tree missing_abi_tags (tree);
extern void fixup_type_variants (tree);
extern void fixup_attribute_variants (tree);
extern tree* decl_cloned_function_p (const_tree, bool);
extern void clone_function_decl (tree, bool);
extern void adjust_clone_args (tree);
extern void deduce_noexcept_on_destructor (tree);
extern bool uniquely_derived_from_p (tree, tree);
extern bool publicly_uniquely_derived_p (tree, tree);
extern tree common_enclosing_class (tree, tree);
/* in cvt.c */
extern tree convert_to_reference (tree, tree, int, int, tree,
tsubst_flags_t);
extern tree convert_from_reference (tree);
extern tree force_rvalue (tree, tsubst_flags_t);
extern tree ocp_convert (tree, tree, int, int,
tsubst_flags_t);
extern tree cp_convert (tree, tree, tsubst_flags_t);
extern tree cp_convert_and_check (tree, tree, tsubst_flags_t);
extern tree cp_fold_convert (tree, tree);
extern tree cp_get_callee (tree);
extern tree cp_get_callee_fndecl (tree);
extern tree cp_get_callee_fndecl_nofold (tree);
extern tree cp_get_fndecl_from_callee (tree, bool fold = true);
extern tree convert_to_void (tree, impl_conv_void,
tsubst_flags_t);
extern tree convert_force (tree, tree, int,
tsubst_flags_t);
extern tree build_expr_type_conversion (int, tree, bool);
extern tree type_promotes_to (tree);
extern bool can_convert_qual (tree, tree);
extern tree perform_qualification_conversions (tree, tree);
extern bool tx_safe_fn_type_p (tree);
extern tree tx_unsafe_fn_variant (tree);
extern bool fnptr_conv_p (tree, tree);
extern tree strip_fnptr_conv (tree);
/* in name-lookup.c */
extern void maybe_push_cleanup_level (tree);
extern tree make_anon_name (void);
extern tree check_for_out_of_scope_variable (tree);
extern void dump (cp_binding_level &ref);
extern void dump (cp_binding_level *ptr);
extern void print_other_binding_stack (cp_binding_level *);
extern tree maybe_push_decl (tree);
extern tree current_decl_namespace (void);
/* decl.c */
extern tree poplevel (int, int, int);
extern void cxx_init_decl_processing (void);
enum cp_tree_node_structure_enum cp_tree_node_structure
(union lang_tree_node *);
extern void finish_scope (void);
extern void push_switch (tree);
extern void pop_switch (void);
extern void note_break_stmt (void);
extern bool note_iteration_stmt_body_start (void);
extern void note_iteration_stmt_body_end (bool);
extern tree make_lambda_name (void);
extern int decls_match (tree, tree, bool = true);
extern bool maybe_version_functions (tree, tree, bool);
extern tree duplicate_decls (tree, tree, bool);
extern tree declare_local_label (tree);
extern tree define_label (location_t, tree);
extern void check_goto (tree);
extern bool check_omp_return (void);
extern tree make_typename_type (tree, tree, enum tag_types, tsubst_flags_t);
extern tree build_typename_type (tree, tree, tree, tag_types);
extern tree make_unbound_class_template (tree, tree, tree, tsubst_flags_t);
extern tree build_library_fn_ptr (const char *, tree, int);
extern tree build_cp_library_fn_ptr (const char *, tree, int);
extern tree push_library_fn (tree, tree, tree, int);
extern tree push_void_library_fn (tree, tree, int);
extern tree push_throw_library_fn (tree, tree);
extern void warn_misplaced_attr_for_class_type (source_location location,
tree class_type);
extern tree check_tag_decl (cp_decl_specifier_seq *, bool);
extern tree shadow_tag (cp_decl_specifier_seq *);
extern tree groktypename (cp_decl_specifier_seq *, const cp_declarator *, bool);
extern tree start_decl (const cp_declarator *, cp_decl_specifier_seq *, int, tree, tree, tree *);
extern void start_decl_1 (tree, bool);
extern bool check_array_initializer (tree, tree, tree);
extern void cp_finish_decl (tree, tree, bool, tree, int);
extern tree lookup_decomp_type (tree);
extern void cp_maybe_mangle_decomp (tree, tree, unsigned int);
extern void cp_finish_decomp (tree, tree, unsigned int);
extern int cp_complete_array_type (tree *, tree, bool);
extern int cp_complete_array_type_or_error (tree *, tree, bool, tsubst_flags_t);
extern tree build_ptrmemfunc_type (tree);
extern tree build_ptrmem_type (tree, tree);
/* the grokdeclarator prototype is in decl.h */
extern tree build_this_parm (tree, tree, cp_cv_quals);
extern tree grokparms (tree, tree *);
extern int copy_fn_p (const_tree);
extern bool move_fn_p (const_tree);
extern bool move_signature_fn_p (const_tree);
extern tree get_scope_of_declarator (const cp_declarator *);
extern void grok_special_member_properties (tree);
extern bool grok_ctor_properties (const_tree, const_tree);
extern bool grok_op_properties (tree, bool);
extern tree xref_tag (enum tag_types, tree, tag_scope, bool);
extern tree xref_tag_from_type (tree, tree, tag_scope);
extern void xref_basetypes (tree, tree);
extern tree start_enum (tree, tree, tree, tree, bool, bool *);
extern void finish_enum_value_list (tree);
extern void finish_enum (tree);
extern void build_enumerator (tree, tree, tree, tree, location_t);
extern tree lookup_enumerator (tree, tree);
extern bool start_preparsed_function (tree, tree, int);
extern bool start_function (cp_decl_specifier_seq *,
const cp_declarator *, tree);
extern tree begin_function_body (void);
extern void finish_function_body (tree);
extern tree outer_curly_brace_block (tree);
extern tree finish_function (bool);
extern tree grokmethod (cp_decl_specifier_seq *, const cp_declarator *, tree);
extern void maybe_register_incomplete_var (tree);
extern void maybe_commonize_var (tree);
extern void complete_vars (tree);
extern tree static_fn_type (tree);
extern void revert_static_member_fn (tree);
extern void fixup_anonymous_aggr (tree);
extern tree compute_array_index_type (tree, tree, tsubst_flags_t);
extern tree check_default_argument (tree, tree, tsubst_flags_t);
extern int wrapup_namespace_globals ();
extern tree create_implicit_typedef (tree, tree);
extern int local_variable_p (const_tree);
extern tree register_dtor_fn (tree);
extern tmpl_spec_kind current_tmpl_spec_kind (int);
extern tree cp_fname_init (const char *, tree *);
extern tree cxx_builtin_function (tree decl);
extern tree cxx_builtin_function_ext_scope (tree decl);
extern tree check_elaborated_type_specifier (enum tag_types, tree, bool);
extern void warn_extern_redeclared_static (tree, tree);
extern tree cxx_comdat_group (tree);
extern bool cp_missing_noreturn_ok_p (tree);
extern bool is_direct_enum_init (tree, tree);
extern void initialize_artificial_var (tree, vec<constructor_elt, va_gc> *);
extern tree check_var_type (tree, tree);
extern tree reshape_init (tree, tree, tsubst_flags_t);
extern tree next_initializable_field (tree);
extern tree fndecl_declared_return_type (tree);
extern bool undeduced_auto_decl (tree);
extern bool require_deduced_type (tree, tsubst_flags_t = tf_warning_or_error);
extern tree finish_case_label (location_t, tree, tree);
extern tree cxx_maybe_build_cleanup (tree, tsubst_flags_t);
extern bool check_array_designated_initializer (constructor_elt *,
unsigned HOST_WIDE_INT);
extern bool check_for_uninitialized_const_var (tree, bool, tsubst_flags_t);
/* in decl2.c */
extern void record_mangling (tree, bool);
extern void overwrite_mangling (tree, tree);
extern void note_mangling_alias (tree, tree);
extern void generate_mangling_aliases (void);
extern tree build_memfn_type (tree, tree, cp_cv_quals, cp_ref_qualifier);
extern tree build_pointer_ptrmemfn_type (tree);
extern tree change_return_type (tree, tree);
extern void maybe_retrofit_in_chrg (tree);
extern void maybe_make_one_only (tree);
extern bool vague_linkage_p (tree);
extern void grokclassfn (tree, tree,
enum overload_flags);
extern tree grok_array_decl (location_t, tree, tree, bool);
extern tree delete_sanity (tree, tree, bool, int, tsubst_flags_t);
extern tree check_classfn (tree, tree, tree);
extern void check_member_template (tree);
extern tree grokfield (const cp_declarator *, cp_decl_specifier_seq *,
tree, bool, tree, tree);
extern tree grokbitfield (const cp_declarator *, cp_decl_specifier_seq *,
tree, tree, tree);
extern bool any_dependent_type_attributes_p (tree);
extern tree cp_reconstruct_complex_type (tree, tree);
extern bool attributes_naming_typedef_ok (tree);
extern void cplus_decl_attributes (tree *, tree, int);
extern void finish_anon_union (tree);
extern void cxx_post_compilation_parsing_cleanups (void);
extern tree coerce_new_type (tree);
extern tree coerce_delete_type (tree);
extern void comdat_linkage (tree);
extern void determine_visibility (tree);
extern void constrain_class_visibility (tree);
extern void reset_type_linkage (tree);
extern void tentative_decl_linkage (tree);
extern void import_export_decl (tree);
extern tree build_cleanup (tree);
extern tree build_offset_ref_call_from_tree (tree, vec<tree, va_gc> **,
tsubst_flags_t);
extern bool decl_defined_p (tree);
extern bool decl_constant_var_p (tree);
extern bool decl_maybe_constant_var_p (tree);
extern void no_linkage_error (tree);
extern void check_default_args (tree);
extern bool mark_used (tree);
extern bool mark_used (tree, tsubst_flags_t);
extern void finish_static_data_member_decl (tree, tree, bool, tree, int);
extern tree cp_build_parm_decl (tree, tree, tree);
extern tree get_guard (tree);
extern tree get_guard_cond (tree, bool);
extern tree set_guard (tree);
extern tree get_tls_wrapper_fn (tree);
extern void mark_needed (tree);
extern bool decl_needed_p (tree);
extern void note_vague_linkage_fn (tree);
extern void note_variable_template_instantiation (tree);
extern tree build_artificial_parm (tree, tree, tree);
extern bool possibly_inlined_p (tree);
extern int parm_index (tree);
extern tree vtv_start_verification_constructor_init_function (void);
extern tree vtv_finish_verification_constructor_init_function (tree);
extern bool cp_omp_mappable_type (tree);
/* in error.c */
extern const char *type_as_string (tree, int);
extern const char *type_as_string_translate (tree, int);
extern const char *decl_as_string (tree, int);
extern const char *decl_as_string_translate (tree, int);
extern const char *decl_as_dwarf_string (tree, int);
extern const char *expr_as_string (tree, int);
extern const char *lang_decl_name (tree, int, bool);
extern const char *lang_decl_dwarf_name (tree, int, bool);
extern const char *language_to_string (enum languages);
extern const char *class_key_or_enum_as_string (tree);
extern void maybe_warn_variadic_templates (void);
extern void maybe_warn_cpp0x (cpp0x_warn_str str);
extern bool pedwarn_cxx98 (location_t, int, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4);
extern location_t location_of (tree);
extern void qualified_name_lookup_error (tree, tree, tree,
location_t);
/* in except.c */
extern void init_exception_processing (void);
extern tree expand_start_catch_block (tree);
extern void expand_end_catch_block (void);
extern tree build_exc_ptr (void);
extern tree build_throw (tree);
extern int nothrow_libfn_p (const_tree);
extern void check_handlers (tree);
extern tree finish_noexcept_expr (tree, tsubst_flags_t);
extern bool expr_noexcept_p (tree, tsubst_flags_t);
extern void perform_deferred_noexcept_checks (void);
extern bool nothrow_spec_p (const_tree);
extern bool type_noexcept_p (const_tree);
extern bool type_throw_all_p (const_tree);
extern tree build_noexcept_spec (tree, int);
extern void choose_personality_routine (enum languages);
extern tree build_must_not_throw_expr (tree,tree);
extern tree eh_type_info (tree);
extern tree begin_eh_spec_block (void);
extern void finish_eh_spec_block (tree, tree);
extern tree build_eh_type_type (tree);
extern tree cp_protect_cleanup_actions (void);
extern tree create_try_catch_expr (tree, tree);
/* in expr.c */
extern tree cplus_expand_constant (tree);
extern tree mark_use (tree expr, bool rvalue_p, bool read_p,
location_t = UNKNOWN_LOCATION,
bool reject_builtin = true);
extern tree mark_rvalue_use (tree,
location_t = UNKNOWN_LOCATION,
bool reject_builtin = true);
extern tree mark_lvalue_use (tree);
extern tree mark_lvalue_use_nonread (tree);
extern tree mark_type_use (tree);
extern tree mark_discarded_use (tree);
extern void mark_exp_read (tree);
/* friend.c */
extern int is_friend (tree, tree);
extern void make_friend_class (tree, tree, bool);
extern void add_friend (tree, tree, bool);
extern tree do_friend (tree, tree, tree, tree,
enum overload_flags, bool);
extern void set_global_friend (tree);
extern bool is_global_friend (tree);
/* in init.c */
extern tree expand_member_init (tree);
extern void emit_mem_initializers (tree);
extern tree build_aggr_init (tree, tree, int,
tsubst_flags_t);
extern int is_class_type (tree, int);
extern tree get_type_value (tree);
extern tree build_zero_init (tree, tree, bool);
extern tree build_value_init (tree, tsubst_flags_t);
extern tree build_value_init_noctor (tree, tsubst_flags_t);
extern tree get_nsdmi (tree, bool, tsubst_flags_t);
extern tree build_offset_ref (tree, tree, bool,
tsubst_flags_t);
extern tree throw_bad_array_new_length (void);
extern bool type_has_new_extended_alignment (tree);
extern unsigned malloc_alignment (void);
extern tree build_new (vec<tree, va_gc> **, tree, tree,
vec<tree, va_gc> **, int,
tsubst_flags_t);
extern tree get_temp_regvar (tree, tree);
extern tree build_vec_init (tree, tree, tree, bool, int,
tsubst_flags_t);
extern tree build_delete (tree, tree,
special_function_kind,
int, int, tsubst_flags_t);
extern void push_base_cleanups (void);
extern tree build_vec_delete (tree, tree,
special_function_kind, int,
tsubst_flags_t);
extern tree create_temporary_var (tree);
extern void initialize_vtbl_ptrs (tree);
extern tree scalar_constant_value (tree);
extern tree decl_really_constant_value (tree);
extern int diagnose_uninitialized_cst_or_ref_member (tree, bool, bool);
extern tree build_vtbl_address (tree);
extern bool maybe_reject_flexarray_init (tree, tree);
/* in lex.c */
extern void cxx_dup_lang_specific_decl (tree);
extern void yyungetc (int, int);
extern tree unqualified_name_lookup_error (tree,
location_t = UNKNOWN_LOCATION);
extern tree unqualified_fn_lookup_error (cp_expr);
extern tree make_conv_op_name (tree);
extern tree build_lang_decl (enum tree_code, tree, tree);
extern tree build_lang_decl_loc (location_t, enum tree_code, tree, tree);
extern void retrofit_lang_decl (tree);
extern void fit_decomposition_lang_decl (tree, tree);
extern tree copy_decl (tree CXX_MEM_STAT_INFO);
extern tree copy_type (tree CXX_MEM_STAT_INFO);
extern tree cxx_make_type (enum tree_code);
extern tree make_class_type (enum tree_code);
extern const char *get_identifier_kind_name (tree);
extern void set_identifier_kind (tree, cp_identifier_kind);
extern bool cxx_init (void);
extern void cxx_finish (void);
extern bool in_main_input_context (void);
/* in method.c */
extern void init_method (void);
extern tree make_thunk (tree, bool, tree, tree);
extern void finish_thunk (tree);
extern void use_thunk (tree, bool);
extern bool trivial_fn_p (tree);
extern tree forward_parm (tree);
extern bool is_trivially_xible (enum tree_code, tree, tree);
extern bool is_xible (enum tree_code, tree, tree);
extern tree get_defaulted_eh_spec (tree, tsubst_flags_t = tf_warning_or_error);
extern void after_nsdmi_defaulted_late_checks (tree);
extern bool maybe_explain_implicit_delete (tree);
extern void explain_implicit_non_constexpr (tree);
extern void deduce_inheriting_ctor (tree);
extern void synthesize_method (tree);
extern tree lazily_declare_fn (special_function_kind,
tree);
extern tree skip_artificial_parms_for (const_tree, tree);
extern int num_artificial_parms_for (const_tree);
extern tree make_alias_for (tree, tree);
extern tree get_copy_ctor (tree, tsubst_flags_t);
extern tree get_copy_assign (tree);
extern tree get_default_ctor (tree);
extern tree get_dtor (tree, tsubst_flags_t);
extern tree strip_inheriting_ctors (tree);
extern tree inherited_ctor_binfo (tree);
extern bool ctor_omit_inherited_parms (tree);
extern tree locate_ctor (tree);
extern tree implicitly_declare_fn (special_function_kind, tree,
bool, tree, tree);
/* In optimize.c */
extern bool maybe_clone_body (tree);
/* In parser.c */
extern tree cp_convert_range_for (tree, tree, tree, tree, unsigned int, bool,
unsigned short);
extern bool parsing_nsdmi (void);
extern bool parsing_default_capturing_generic_lambda_in_template (void);
extern void inject_this_parameter (tree, cp_cv_quals);
extern location_t defarg_location (tree);
extern void maybe_show_extern_c_location (void);
/* in pt.c */
extern bool check_template_shadow (tree);
extern bool check_auto_in_tmpl_args (tree, tree);
extern tree get_innermost_template_args (tree, int);
extern void maybe_begin_member_template_processing (tree);
extern void maybe_end_member_template_processing (void);
extern tree finish_member_template_decl (tree);
extern void begin_template_parm_list (void);
extern bool begin_specialization (void);
extern void reset_specialization (void);
extern void end_specialization (void);
extern void begin_explicit_instantiation (void);
extern void end_explicit_instantiation (void);
extern void check_unqualified_spec_or_inst (tree, location_t);
extern tree check_explicit_specialization (tree, tree, int, int,
tree = NULL_TREE);
extern int num_template_headers_for_class (tree);
extern void check_template_variable (tree);
extern tree make_auto (void);
extern tree make_decltype_auto (void);
extern tree make_template_placeholder (tree);
extern bool template_placeholder_p (tree);
extern tree do_auto_deduction (tree, tree, tree,
tsubst_flags_t
= tf_warning_or_error,
auto_deduction_context
= adc_unspecified,
tree = NULL_TREE,
int = LOOKUP_NORMAL);
extern tree type_uses_auto (tree);
extern tree type_uses_auto_or_concept (tree);
extern void append_type_to_template_for_access_check (tree, tree, tree,
location_t);
extern tree convert_generic_types_to_packs (tree, int, int);
extern tree splice_late_return_type (tree, tree);
extern bool is_auto (const_tree);
extern tree process_template_parm (tree, location_t, tree,
bool, bool);
extern tree end_template_parm_list (tree);
extern void end_template_parm_list (void);
extern void end_template_decl (void);
extern tree maybe_update_decl_type (tree, tree);
extern bool check_default_tmpl_args (tree, tree, bool, bool, int);
extern tree push_template_decl (tree);
extern tree push_template_decl_real (tree, bool);
extern tree add_inherited_template_parms (tree, tree);
extern bool redeclare_class_template (tree, tree, tree);
extern tree lookup_template_class (tree, tree, tree, tree,
int, tsubst_flags_t);
extern tree lookup_template_function (tree, tree);
extern tree lookup_template_variable (tree, tree);
extern int uses_template_parms (tree);
extern bool uses_template_parms_level (tree, int);
extern bool in_template_function (void);
extern bool need_generic_capture (void);
extern tree instantiate_class_template (tree);
extern tree instantiate_template (tree, tree, tsubst_flags_t);
extern tree fn_type_unification (tree, tree, tree,
const tree *, unsigned int,
tree, unification_kind_t, int,
bool, bool);
extern void mark_decl_instantiated (tree, int);
extern int more_specialized_fn (tree, tree, int);
extern void do_decl_instantiation (tree, tree);
extern void do_type_instantiation (tree, tree, tsubst_flags_t);
extern bool always_instantiate_p (tree);
extern bool maybe_instantiate_noexcept (tree, tsubst_flags_t = tf_warning_or_error);
extern tree instantiate_decl (tree, bool, bool);
extern int comp_template_parms (const_tree, const_tree);
extern bool builtin_pack_fn_p (tree);
extern bool uses_parameter_packs (tree);
extern bool template_parameter_pack_p (const_tree);
extern bool function_parameter_pack_p (const_tree);
extern bool function_parameter_expanded_from_pack_p (tree, tree);
extern tree make_pack_expansion (tree, tsubst_flags_t = tf_warning_or_error);
extern bool check_for_bare_parameter_packs (tree, location_t = UNKNOWN_LOCATION);
extern tree build_template_info (tree, tree);
extern tree get_template_info (const_tree);
extern vec<qualified_typedef_usage_t, va_gc> *get_types_needing_access_check (tree);
extern int template_class_depth (tree);
extern int is_specialization_of (tree, tree);
extern bool is_specialization_of_friend (tree, tree);
extern tree get_pattern_parm (tree, tree);
extern int comp_template_args (tree, tree, tree * = NULL,
tree * = NULL, bool = false);
extern int template_args_equal (tree, tree, bool = false);
extern tree maybe_process_partial_specialization (tree);
extern tree most_specialized_instantiation (tree);
extern void print_candidates (tree);
extern void instantiate_pending_templates (int);
extern tree tsubst_default_argument (tree, int, tree, tree,
tsubst_flags_t);
extern tree tsubst (tree, tree, tsubst_flags_t, tree);
extern tree tsubst_copy_and_build (tree, tree, tsubst_flags_t,
tree, bool, bool);
extern tree tsubst_expr (tree, tree, tsubst_flags_t,
tree, bool);
extern tree tsubst_pack_expansion (tree, tree, tsubst_flags_t, tree);
extern tree most_general_template (tree);
extern tree get_mostly_instantiated_function_type (tree);
extern bool problematic_instantiation_changed (void);
extern void record_last_problematic_instantiation (void);
extern struct tinst_level *current_instantiation(void);
extern bool instantiating_current_function_p (void);
extern tree maybe_get_template_decl_from_type_decl (tree);
extern int processing_template_parmlist;
extern bool dependent_type_p (tree);
extern bool dependent_scope_p (tree);
extern bool any_dependent_template_arguments_p (const_tree);
extern bool any_erroneous_template_args_p (const_tree);
extern bool dependent_template_p (tree);
extern bool dependent_template_id_p (tree, tree);
extern bool type_dependent_expression_p (tree);
extern bool type_dependent_object_expression_p (tree);
extern bool any_type_dependent_arguments_p (const vec<tree, va_gc> *);
extern bool any_type_dependent_elements_p (const_tree);
extern bool type_dependent_expression_p_push (tree);
extern bool value_dependent_expression_p (tree);
extern bool instantiation_dependent_expression_p (tree);
extern bool instantiation_dependent_uneval_expression_p (tree);
extern bool any_value_dependent_elements_p (const_tree);
extern bool dependent_omp_for_p (tree, tree, tree, tree);
extern tree resolve_typename_type (tree, bool);
extern tree template_for_substitution (tree);
extern tree build_non_dependent_expr (tree);
extern void make_args_non_dependent (vec<tree, va_gc> *);
extern bool reregister_specialization (tree, tree, tree);
extern tree instantiate_non_dependent_expr (tree);
extern tree instantiate_non_dependent_expr_sfinae (tree, tsubst_flags_t);
extern tree instantiate_non_dependent_expr_internal (tree, tsubst_flags_t);
extern tree instantiate_non_dependent_or_null (tree);
extern bool variable_template_specialization_p (tree);
extern bool alias_type_or_template_p (tree);
extern bool alias_template_specialization_p (const_tree);
extern bool dependent_alias_template_spec_p (const_tree);
extern bool explicit_class_specialization_p (tree);
extern bool push_tinst_level (tree);
extern bool push_tinst_level_loc (tree, location_t);
extern void pop_tinst_level (void);
extern struct tinst_level *outermost_tinst_level(void);
extern void init_template_processing (void);
extern void print_template_statistics (void);
bool template_template_parameter_p (const_tree);
bool template_type_parameter_p (const_tree);
extern bool primary_template_specialization_p (const_tree);
extern tree get_primary_template_innermost_parameters (const_tree);
extern tree get_template_parms_at_level (tree, int);
extern tree get_template_innermost_arguments (const_tree);
extern tree get_template_argument_pack_elems (const_tree);
extern tree get_function_template_decl (const_tree);
extern tree resolve_nondeduced_context (tree, tsubst_flags_t);
extern hashval_t iterative_hash_template_arg (tree arg, hashval_t val);
extern tree coerce_template_parms (tree, tree, tree);
extern tree coerce_template_parms (tree, tree, tree, tsubst_flags_t);
extern void register_local_specialization (tree, tree);
extern tree retrieve_local_specialization (tree);
extern tree extract_fnparm_pack (tree, tree *);
extern tree template_parm_to_arg (tree);
extern tree dguide_name (tree);
extern bool dguide_name_p (tree);
extern bool deduction_guide_p (const_tree);
extern bool copy_guide_p (const_tree);
extern bool template_guide_p (const_tree);
/* in repo.c */
extern void init_repo (void);
extern int repo_emit_p (tree);
extern bool repo_export_class_p (const_tree);
extern void finish_repo (void);
/* in rtti.c */
/* A vector of all tinfo decls that haven't been emitted yet. */
extern GTY(()) vec<tree, va_gc> *unemitted_tinfo_decls;
extern void init_rtti_processing (void);
extern tree build_typeid (tree, tsubst_flags_t);
extern tree get_tinfo_decl (tree);
extern tree get_typeid (tree, tsubst_flags_t);
extern tree build_headof (tree);
extern tree build_dynamic_cast (tree, tree, tsubst_flags_t);
extern void emit_support_tinfos (void);
extern bool emit_tinfo_decl (tree);
/* in search.c */
extern bool accessible_base_p (tree, tree, bool);
extern tree lookup_base (tree, tree, base_access,
base_kind *, tsubst_flags_t);
extern tree dcast_base_hint (tree, tree);
extern int accessible_p (tree, tree, bool);
extern int accessible_in_template_p (tree, tree);
extern tree lookup_field (tree, tree, int, bool);
extern tree lookup_fnfields (tree, tree, int);
extern tree lookup_member (tree, tree, int, bool,
tsubst_flags_t,
access_failure_info *afi = NULL);
extern tree lookup_member_fuzzy (tree, tree, bool);
extern tree locate_field_accessor (tree, tree, bool);
extern int look_for_overrides (tree, tree);
extern void get_pure_virtuals (tree);
extern void maybe_suppress_debug_info (tree);
extern void note_debug_info_needed (tree);
extern tree current_scope (void);
extern int at_function_scope_p (void);
extern bool at_class_scope_p (void);
extern bool at_namespace_scope_p (void);
extern tree context_for_name_lookup (tree);
extern tree lookup_conversions (tree);
extern tree binfo_from_vbase (tree);
extern tree binfo_for_vbase (tree, tree);
extern tree look_for_overrides_here (tree, tree);
#define dfs_skip_bases ((tree)1)
extern tree dfs_walk_all (tree, tree (*) (tree, void *),
tree (*) (tree, void *), void *);
extern tree dfs_walk_once (tree, tree (*) (tree, void *),
tree (*) (tree, void *), void *);
extern tree binfo_via_virtual (tree, tree);
extern bool binfo_direct_p (tree);
extern tree build_baselink (tree, tree, tree, tree);
extern tree adjust_result_of_qualified_name_lookup
(tree, tree, tree);
extern tree copied_binfo (tree, tree);
extern tree original_binfo (tree, tree);
extern int shared_member_p (tree);
extern bool any_dependent_bases_p (tree = current_nonlambda_class_type ());
/* The representation of a deferred access check. */
struct GTY(()) deferred_access_check {
/* The base class in which the declaration is referenced. */
tree binfo;
/* The declaration whose access must be checked. */
tree decl;
/* The declaration that should be used in the error message. */
tree diag_decl;
/* The location of this access. */
location_t loc;
};
/* in semantics.c */
extern void push_deferring_access_checks (deferring_kind);
extern void resume_deferring_access_checks (void);
extern void stop_deferring_access_checks (void);
extern void pop_deferring_access_checks (void);
extern vec<deferred_access_check, va_gc> *get_deferred_access_checks (void);
extern void reopen_deferring_access_checks (vec<deferred_access_check, va_gc> *);
extern void pop_to_parent_deferring_access_checks (void);
extern bool perform_access_checks (vec<deferred_access_check, va_gc> *,
tsubst_flags_t);
extern bool perform_deferred_access_checks (tsubst_flags_t);
extern bool perform_or_defer_access_check (tree, tree, tree,
tsubst_flags_t,
access_failure_info *afi = NULL);
/* RAII sentinel to ensures that deferred access checks are popped before
a function returns. */
struct deferring_access_check_sentinel
{
deferring_access_check_sentinel (enum deferring_kind kind = dk_deferred)
{
push_deferring_access_checks (kind);
}
~deferring_access_check_sentinel ()
{
pop_deferring_access_checks ();
}
};
extern int stmts_are_full_exprs_p (void);
extern void init_cp_semantics (void);
extern tree do_poplevel (tree);
extern void break_maybe_infinite_loop (void);
extern void add_decl_expr (tree);
extern tree maybe_cleanup_point_expr_void (tree);
extern tree finish_expr_stmt (tree);
extern tree begin_if_stmt (void);
extern tree finish_if_stmt_cond (tree, tree);
extern tree finish_then_clause (tree);
extern void begin_else_clause (tree);
extern void finish_else_clause (tree);
extern void finish_if_stmt (tree);
extern tree begin_while_stmt (void);
extern void finish_while_stmt_cond (tree, tree, bool, unsigned short);
extern void finish_while_stmt (tree);
extern tree begin_do_stmt (void);
extern void finish_do_body (tree);
extern void finish_do_stmt (tree, tree, bool, unsigned short);
extern tree finish_return_stmt (tree);
extern tree begin_for_scope (tree *);
extern tree begin_for_stmt (tree, tree);
extern void finish_init_stmt (tree);
extern void finish_for_cond (tree, tree, bool, unsigned short);
extern void finish_for_expr (tree, tree);
extern void finish_for_stmt (tree);
extern tree begin_range_for_stmt (tree, tree);
extern void finish_range_for_decl (tree, tree, tree);
extern void finish_range_for_stmt (tree);
extern tree finish_break_stmt (void);
extern tree finish_continue_stmt (void);
extern tree begin_switch_stmt (void);
extern void finish_switch_cond (tree, tree);
extern void finish_switch_stmt (tree);
extern tree finish_goto_stmt (tree);
extern tree begin_try_block (void);
extern void finish_try_block (tree);
extern void finish_handler_sequence (tree);
extern tree begin_function_try_block (tree *);
extern void finish_function_try_block (tree);
extern void finish_function_handler_sequence (tree, tree);
extern void finish_cleanup_try_block (tree);
extern tree begin_handler (void);
extern void finish_handler_parms (tree, tree);
extern void finish_handler (tree);
extern void finish_cleanup (tree, tree);
extern bool is_this_parameter (tree);
enum {
BCS_NORMAL = 0,
BCS_NO_SCOPE = 1,
BCS_TRY_BLOCK = 2,
BCS_FN_BODY = 4,
BCS_TRANSACTION = 8
};
extern tree begin_compound_stmt (unsigned int);
extern void finish_compound_stmt (tree);
extern tree finish_asm_stmt (int, tree, tree, tree, tree,
tree, bool);
extern tree finish_label_stmt (tree);
extern void finish_label_decl (tree);
extern cp_expr finish_parenthesized_expr (cp_expr);
extern tree force_paren_expr (tree);
extern tree maybe_undo_parenthesized_ref (tree);
extern tree finish_non_static_data_member (tree, tree, tree);
extern tree begin_stmt_expr (void);
extern tree finish_stmt_expr_expr (tree, tree);
extern tree finish_stmt_expr (tree, bool);
extern tree stmt_expr_value_expr (tree);
bool empty_expr_stmt_p (tree);
extern cp_expr perform_koenig_lookup (cp_expr, vec<tree, va_gc> *,
tsubst_flags_t);
extern tree finish_call_expr (tree, vec<tree, va_gc> **, bool,
bool, tsubst_flags_t);
extern tree lookup_and_finish_template_variable (tree, tree, tsubst_flags_t = tf_warning_or_error);
extern tree finish_template_variable (tree, tsubst_flags_t = tf_warning_or_error);
extern cp_expr finish_increment_expr (cp_expr, enum tree_code);
extern tree finish_this_expr (void);
extern tree finish_pseudo_destructor_expr (tree, tree, tree, location_t);
extern cp_expr finish_unary_op_expr (location_t, enum tree_code, cp_expr,
tsubst_flags_t);
/* Whether this call to finish_compound_literal represents a C++11 functional
cast or a C99 compound literal. */
enum fcl_t { fcl_functional, fcl_c99 };
extern tree finish_compound_literal (tree, tree, tsubst_flags_t, fcl_t = fcl_functional);
extern tree finish_fname (tree);
extern void finish_translation_unit (void);
extern tree finish_template_type_parm (tree, tree);
extern tree finish_template_template_parm (tree, tree);
extern tree begin_class_definition (tree);
extern void finish_template_decl (tree);
extern tree finish_template_type (tree, tree, int);
extern tree finish_base_specifier (tree, tree, bool);
extern void finish_member_declaration (tree);
extern bool outer_automatic_var_p (tree);
extern tree process_outer_var_ref (tree, tsubst_flags_t, bool force_use = false);
extern cp_expr finish_id_expression (tree, tree, tree,
cp_id_kind *,
bool, bool, bool *,
bool, bool, bool, bool,
const char **,
location_t);
extern tree finish_typeof (tree);
extern tree finish_underlying_type (tree);
extern tree calculate_bases (tree, tsubst_flags_t);
extern tree finish_bases (tree, bool);
extern tree calculate_direct_bases (tree, tsubst_flags_t);
extern tree finish_offsetof (tree, tree, location_t);
extern void finish_decl_cleanup (tree, tree);
extern void finish_eh_cleanup (tree);
extern void emit_associated_thunks (tree);
extern void finish_mem_initializers (tree);
extern tree check_template_template_default_arg (tree);
extern bool expand_or_defer_fn_1 (tree);
extern void expand_or_defer_fn (tree);
extern void add_typedef_to_current_template_for_access_check (tree, tree,
location_t);
extern void check_accessibility_of_qualified_id (tree, tree, tree);
extern tree finish_qualified_id_expr (tree, tree, bool, bool,
bool, bool, tsubst_flags_t);
extern void simplify_aggr_init_expr (tree *);
extern void finalize_nrv (tree *, tree, tree);
extern tree omp_reduction_id (enum tree_code, tree, tree);
extern tree cp_remove_omp_priv_cleanup_stmt (tree *, int *, void *);
extern void cp_check_omp_declare_reduction (tree);
extern void finish_omp_declare_simd_methods (tree);
extern tree finish_omp_clauses (tree, enum c_omp_region_type);
extern tree push_omp_privatization_clauses (bool);
extern void pop_omp_privatization_clauses (tree);
extern void save_omp_privatization_clauses (vec<tree> &);
extern void restore_omp_privatization_clauses (vec<tree> &);
extern void finish_omp_threadprivate (tree);
extern tree begin_omp_structured_block (void);
extern tree finish_omp_structured_block (tree);
extern tree finish_oacc_data (tree, tree);
extern tree finish_oacc_host_data (tree, tree);
extern tree finish_omp_construct (enum tree_code, tree, tree);
extern tree begin_omp_parallel (void);
extern tree finish_omp_parallel (tree, tree);
extern tree begin_omp_task (void);
extern tree finish_omp_task (tree, tree);
extern tree finish_omp_for (location_t, enum tree_code,
tree, tree, tree, tree, tree,
tree, tree, vec<tree> *, tree);
extern void finish_omp_atomic (enum tree_code, enum tree_code,
tree, tree, tree, tree, tree,
bool);
extern void finish_omp_barrier (void);
extern void finish_omp_flush (void);
extern void finish_omp_taskwait (void);
extern void finish_omp_taskyield (void);
extern void finish_omp_cancel (tree);
extern void finish_omp_cancellation_point (tree);
extern tree omp_privatize_field (tree, bool);
extern tree begin_transaction_stmt (location_t, tree *, int);
extern void finish_transaction_stmt (tree, tree, int, tree);
extern tree build_transaction_expr (location_t, tree, int, tree);
extern bool cxx_omp_create_clause_info (tree, tree, bool, bool,
bool, bool);
extern tree baselink_for_fns (tree);
extern void finish_static_assert (tree, tree, location_t,
bool);
extern tree finish_decltype_type (tree, bool, tsubst_flags_t);
extern tree finish_trait_expr (enum cp_trait_kind, tree, tree);
extern tree build_lambda_expr (void);
extern tree build_lambda_object (tree);
extern tree begin_lambda_type (tree);
extern tree lambda_capture_field_type (tree, bool, bool);
extern tree lambda_return_type (tree);
extern tree lambda_proxy_type (tree);
extern tree lambda_function (tree);
extern void apply_deduced_return_type (tree, tree);
extern tree add_capture (tree, tree, tree, bool, bool);
extern tree add_default_capture (tree, tree, tree);
extern void insert_capture_proxy (tree);
extern void insert_pending_capture_proxies (void);
extern bool is_capture_proxy (tree);
extern bool is_normal_capture_proxy (tree);
extern bool is_constant_capture_proxy (tree);
extern void register_capture_members (tree);
extern tree lambda_expr_this_capture (tree, bool);
extern void maybe_generic_this_capture (tree, tree);
extern tree maybe_resolve_dummy (tree, bool);
extern tree current_nonlambda_function (void);
extern tree nonlambda_method_basetype (void);
extern tree current_nonlambda_scope (void);
extern tree current_lambda_expr (void);
extern bool generic_lambda_fn_p (tree);
extern tree do_dependent_capture (tree, bool = false);
extern bool lambda_fn_in_template_p (tree);
extern void maybe_add_lambda_conv_op (tree);
extern bool is_lambda_ignored_entity (tree);
extern bool lambda_static_thunk_p (tree);
extern tree finish_builtin_launder (location_t, tree,
tsubst_flags_t);
extern void start_lambda_scope (tree);
extern void record_lambda_scope (tree);
extern void record_null_lambda_scope (tree);
extern void finish_lambda_scope (void);
extern tree start_lambda_function (tree fn, tree lambda_expr);
extern void finish_lambda_function (tree body);
/* in tree.c */
extern int cp_tree_operand_length (const_tree);
extern int cp_tree_code_length (enum tree_code);
extern void cp_free_lang_data (tree t);
extern tree force_target_expr (tree, tree, tsubst_flags_t);
extern tree build_target_expr_with_type (tree, tree, tsubst_flags_t);
extern void lang_check_failed (const char *, int,
const char *) ATTRIBUTE_NORETURN
ATTRIBUTE_COLD;
extern tree stabilize_expr (tree, tree *);
extern void stabilize_call (tree, tree *);
extern bool stabilize_init (tree, tree *);
extern tree add_stmt_to_compound (tree, tree);
extern void init_tree (void);
extern bool pod_type_p (const_tree);
extern bool layout_pod_type_p (const_tree);
extern bool std_layout_type_p (const_tree);
extern bool trivial_type_p (const_tree);
extern bool trivially_copyable_p (const_tree);
extern bool type_has_unique_obj_representations (const_tree);
extern bool scalarish_type_p (const_tree);
extern bool type_has_nontrivial_default_init (const_tree);
extern bool type_has_nontrivial_copy_init (const_tree);
extern void maybe_warn_parm_abi (tree, location_t);
extern bool class_tmpl_impl_spec_p (const_tree);
extern int zero_init_p (const_tree);
extern bool check_abi_tag_redeclaration (const_tree, const_tree,
const_tree);
extern bool check_abi_tag_args (tree, tree);
extern tree strip_typedefs (tree, bool * = NULL);
extern tree strip_typedefs_expr (tree, bool * = NULL);
extern tree copy_binfo (tree, tree, tree,
tree *, int);
extern int member_p (const_tree);
extern cp_lvalue_kind real_lvalue_p (const_tree);
extern cp_lvalue_kind lvalue_kind (const_tree);
extern bool glvalue_p (const_tree);
extern bool obvalue_p (const_tree);
extern bool xvalue_p (const_tree);
extern bool bitfield_p (const_tree);
extern tree cp_stabilize_reference (tree);
extern bool builtin_valid_in_constant_expr_p (const_tree);
extern tree build_min (enum tree_code, tree, ...);
extern tree build_min_nt_loc (location_t, enum tree_code,
...);
extern tree build_min_non_dep (enum tree_code, tree, ...);
extern tree build_min_non_dep_op_overload (enum tree_code, tree, tree, ...);
extern tree build_min_nt_call_vec (tree, vec<tree, va_gc> *);
extern tree build_min_non_dep_call_vec (tree, tree, vec<tree, va_gc> *);
extern vec<tree, va_gc>* vec_copy_and_insert (vec<tree, va_gc>*, tree, unsigned);
extern tree build_cplus_new (tree, tree, tsubst_flags_t);
extern tree build_aggr_init_expr (tree, tree);
extern tree get_target_expr (tree);
extern tree get_target_expr_sfinae (tree, tsubst_flags_t);
extern tree build_cplus_array_type (tree, tree);
extern tree build_array_of_n_type (tree, int);
extern bool array_of_runtime_bound_p (tree);
extern bool vla_type_p (tree);
extern tree build_array_copy (tree);
extern tree build_vec_init_expr (tree, tree, tsubst_flags_t);
extern void diagnose_non_constexpr_vec_init (tree);
extern tree hash_tree_cons (tree, tree, tree);
extern tree hash_tree_chain (tree, tree);
extern tree build_qualified_name (tree, tree, tree, bool);
extern tree build_ref_qualified_type (tree, cp_ref_qualifier);
inline tree ovl_first (tree) ATTRIBUTE_PURE;
extern tree ovl_make (tree fn,
tree next = NULL_TREE);
extern tree ovl_insert (tree fn, tree maybe_ovl,
bool using_p = false);
extern tree ovl_skip_hidden (tree) ATTRIBUTE_PURE;
extern void lookup_mark (tree lookup, bool val);
extern tree lookup_add (tree fns, tree lookup);
extern tree lookup_maybe_add (tree fns, tree lookup,
bool deduping);
extern void lookup_keep (tree lookup, bool keep);
extern void lookup_list_keep (tree list, bool keep);
extern int is_overloaded_fn (tree) ATTRIBUTE_PURE;
extern bool really_overloaded_fn (tree) ATTRIBUTE_PURE;
extern tree dependent_name (tree);
extern tree get_fns (tree) ATTRIBUTE_PURE;
extern tree get_first_fn (tree) ATTRIBUTE_PURE;
extern tree ovl_scope (tree);
extern const char *cxx_printable_name (tree, int);
extern const char *cxx_printable_name_translate (tree, int);
extern tree canonical_eh_spec (tree);
extern tree build_exception_variant (tree, tree);
extern tree bind_template_template_parm (tree, tree);
extern tree array_type_nelts_total (tree);
extern tree array_type_nelts_top (tree);
extern tree break_out_target_exprs (tree, bool = false);
extern tree build_ctor_subob_ref (tree, tree, tree);
extern tree replace_placeholders (tree, tree, bool * = NULL);
extern bool find_placeholders (tree);
extern tree get_type_decl (tree);
extern tree decl_namespace_context (tree);
extern bool decl_anon_ns_mem_p (const_tree);
extern tree lvalue_type (tree);
extern tree error_type (tree);
extern int varargs_function_p (const_tree);
extern bool cp_tree_equal (tree, tree);
extern tree no_linkage_check (tree, bool);
extern void debug_binfo (tree);
extern tree build_dummy_object (tree);
extern tree maybe_dummy_object (tree, tree *);
extern int is_dummy_object (const_tree);
extern const struct attribute_spec cxx_attribute_table[];
extern tree make_ptrmem_cst (tree, tree);
extern tree cp_build_type_attribute_variant (tree, tree);
extern tree cp_build_reference_type (tree, bool);
extern tree move (tree);
extern tree cp_build_qualified_type_real (tree, int, tsubst_flags_t);
#define cp_build_qualified_type(TYPE, QUALS) \
cp_build_qualified_type_real ((TYPE), (QUALS), tf_warning_or_error)
extern bool cv_qualified_p (const_tree);
extern tree cv_unqualified (tree);
extern special_function_kind special_function_p (const_tree);
extern int count_trees (tree);
extern int char_type_p (tree);
extern void verify_stmt_tree (tree);
extern linkage_kind decl_linkage (tree);
extern duration_kind decl_storage_duration (tree);
extern tree cp_walk_subtrees (tree*, int*, walk_tree_fn,
void*, hash_set<tree> *);
#define cp_walk_tree(tp,func,data,pset) \
walk_tree_1 (tp, func, data, pset, cp_walk_subtrees)
#define cp_walk_tree_without_duplicates(tp,func,data) \
walk_tree_without_duplicates_1 (tp, func, data, cp_walk_subtrees)
extern tree rvalue (tree);
extern tree convert_bitfield_to_declared_type (tree);
extern tree cp_save_expr (tree);
extern bool cast_valid_in_integral_constant_expression_p (tree);
extern bool cxx_type_hash_eq (const_tree, const_tree);
extern tree cxx_copy_lang_qualifiers (const_tree, const_tree);
extern void cxx_print_statistics (void);
extern bool maybe_warn_zero_as_null_pointer_constant (tree, location_t);
extern void cp_warn_deprecated_use (tree);
/* in ptree.c */
extern void cxx_print_xnode (FILE *, tree, int);
extern void cxx_print_decl (FILE *, tree, int);
extern void cxx_print_type (FILE *, tree, int);
extern void cxx_print_identifier (FILE *, tree, int);
extern void cxx_print_error_function (diagnostic_context *,
const char *,
struct diagnostic_info *);
/* in typeck.c */
extern bool cxx_mark_addressable (tree, bool = false);
extern int string_conv_p (const_tree, const_tree, int);
extern tree cp_truthvalue_conversion (tree);
extern tree condition_conversion (tree);
extern tree require_complete_type (tree);
extern tree require_complete_type_sfinae (tree, tsubst_flags_t);
extern tree complete_type (tree);
extern tree complete_type_or_else (tree, tree);
extern tree complete_type_or_maybe_complain (tree, tree, tsubst_flags_t);
inline bool type_unknown_p (const_tree);
enum { ce_derived, ce_type, ce_normal, ce_exact };
extern bool comp_except_specs (const_tree, const_tree, int);
extern bool comptypes (tree, tree, int);
extern bool same_type_ignoring_top_level_qualifiers_p (tree, tree);
extern bool compparms (const_tree, const_tree);
extern int comp_cv_qualification (const_tree, const_tree);
extern int comp_cv_qualification (int, int);
extern int comp_cv_qual_signature (tree, tree);
extern tree cxx_sizeof_or_alignof_expr (tree, enum tree_code, bool);
extern tree cxx_sizeof_or_alignof_type (tree, enum tree_code, bool, bool);
extern tree cxx_alignas_expr (tree);
extern tree cxx_sizeof_nowarn (tree);
extern tree is_bitfield_expr_with_lowered_type (const_tree);
extern tree unlowered_expr_type (const_tree);
extern tree decay_conversion (tree,
tsubst_flags_t,
bool = true);
extern tree build_class_member_access_expr (cp_expr, tree, tree, bool,
tsubst_flags_t);
extern tree finish_class_member_access_expr (cp_expr, tree, bool,
tsubst_flags_t);
extern tree build_x_indirect_ref (location_t, tree,
ref_operator, tsubst_flags_t);
extern tree cp_build_indirect_ref (tree, ref_operator,
tsubst_flags_t);
extern tree cp_build_fold_indirect_ref (tree);
extern tree build_array_ref (location_t, tree, tree);
extern tree cp_build_array_ref (location_t, tree, tree,
tsubst_flags_t);
extern tree get_member_function_from_ptrfunc (tree *, tree, tsubst_flags_t);
extern tree cp_build_function_call_nary (tree, tsubst_flags_t, ...)
ATTRIBUTE_SENTINEL;
extern tree cp_build_function_call_vec (tree, vec<tree, va_gc> **,
tsubst_flags_t);
extern tree build_x_binary_op (location_t,
enum tree_code, tree,
enum tree_code, tree,
enum tree_code, tree *,
tsubst_flags_t);
extern tree build_x_array_ref (location_t, tree, tree,
tsubst_flags_t);
extern tree build_x_unary_op (location_t,
enum tree_code, cp_expr,
tsubst_flags_t);
extern tree cp_build_addressof (location_t, tree,
tsubst_flags_t);
extern tree cp_build_addr_expr (tree, tsubst_flags_t);
extern tree cp_build_unary_op (enum tree_code, tree, bool,
tsubst_flags_t);
extern tree genericize_compound_lvalue (tree);
extern tree unary_complex_lvalue (enum tree_code, tree);
extern tree build_x_conditional_expr (location_t, tree, tree, tree,
tsubst_flags_t);
extern tree build_x_compound_expr_from_list (tree, expr_list_kind,
tsubst_flags_t);
extern tree build_x_compound_expr_from_vec (vec<tree, va_gc> *,
const char *, tsubst_flags_t);
extern tree build_x_compound_expr (location_t, tree, tree,
tsubst_flags_t);
extern tree build_compound_expr (location_t, tree, tree);
extern tree cp_build_compound_expr (tree, tree, tsubst_flags_t);
extern tree build_static_cast (tree, tree, tsubst_flags_t);
extern tree build_reinterpret_cast (tree, tree, tsubst_flags_t);
extern tree build_const_cast (tree, tree, tsubst_flags_t);
extern tree build_c_cast (location_t, tree, tree);
extern cp_expr build_c_cast (location_t loc, tree type,
cp_expr expr);
extern tree cp_build_c_cast (tree, tree, tsubst_flags_t);
extern cp_expr build_x_modify_expr (location_t, tree,
enum tree_code, tree,
tsubst_flags_t);
extern tree cp_build_modify_expr (location_t, tree,
enum tree_code, tree,
tsubst_flags_t);
extern tree convert_for_initialization (tree, tree, tree, int,
impl_conv_rhs, tree, int,
tsubst_flags_t);
extern int comp_ptr_ttypes (tree, tree);
extern bool comp_ptr_ttypes_const (tree, tree);
extern bool error_type_p (const_tree);
extern bool ptr_reasonably_similar (const_tree, const_tree);
extern tree build_ptrmemfunc (tree, tree, int, bool,
tsubst_flags_t);
extern int cp_type_quals (const_tree);
extern int type_memfn_quals (const_tree);
extern cp_ref_qualifier type_memfn_rqual (const_tree);
extern tree apply_memfn_quals (tree, cp_cv_quals, cp_ref_qualifier);
extern bool cp_has_mutable_p (const_tree);
extern bool at_least_as_qualified_p (const_tree, const_tree);
extern void cp_apply_type_quals_to_decl (int, tree);
extern tree build_ptrmemfunc1 (tree, tree, tree);
extern void expand_ptrmemfunc_cst (tree, tree *, tree *);
extern tree type_after_usual_arithmetic_conversions (tree, tree);
extern tree common_pointer_type (tree, tree);
extern tree composite_pointer_type (tree, tree, tree, tree,
composite_pointer_operation,
tsubst_flags_t);
extern tree merge_types (tree, tree);
extern tree strip_array_domain (tree);
extern tree check_return_expr (tree, bool *);
extern tree cp_build_binary_op (location_t,
enum tree_code, tree, tree,
tsubst_flags_t);
extern tree build_x_vec_perm_expr (location_t,
tree, tree, tree,
tsubst_flags_t);
#define cxx_sizeof(T) cxx_sizeof_or_alignof_type (T, SIZEOF_EXPR, false, true)
extern tree build_simple_component_ref (tree, tree);
extern tree build_ptrmemfunc_access_expr (tree, tree);
extern tree build_address (tree);
extern tree build_nop (tree, tree);
extern tree non_reference (tree);
extern tree lookup_anon_field (tree, tree);
extern bool invalid_nonstatic_memfn_p (location_t, tree,
tsubst_flags_t);
extern tree convert_member_func_to_ptr (tree, tree, tsubst_flags_t);
extern tree convert_ptrmem (tree, tree, bool, bool,
tsubst_flags_t);
extern int lvalue_or_else (tree, enum lvalue_use,
tsubst_flags_t);
extern void check_template_keyword (tree);
extern bool check_raw_literal_operator (const_tree decl);
extern bool check_literal_operator_args (const_tree, bool *, bool *);
extern void maybe_warn_about_useless_cast (tree, tree, tsubst_flags_t);
extern tree cp_perform_integral_promotions (tree, tsubst_flags_t);
extern tree finish_left_unary_fold_expr (tree, int);
extern tree finish_right_unary_fold_expr (tree, int);
extern tree finish_binary_fold_expr (tree, tree, int);
/* in typeck2.c */
extern void require_complete_eh_spec_types (tree, tree);
extern void cxx_incomplete_type_diagnostic (location_t, const_tree,
const_tree, diagnostic_t);
inline void
cxx_incomplete_type_diagnostic (const_tree value, const_tree type,
diagnostic_t diag_kind)
{
cxx_incomplete_type_diagnostic (EXPR_LOC_OR_LOC (value, input_location),
value, type, diag_kind);
}
extern void cxx_incomplete_type_error (location_t, const_tree,
const_tree);
inline void
cxx_incomplete_type_error (const_tree value, const_tree type)
{
cxx_incomplete_type_diagnostic (value, type, DK_ERROR);
}
extern void cxx_incomplete_type_inform (const_tree);
extern tree error_not_base_type (tree, tree);
extern tree binfo_or_else (tree, tree);
extern void cxx_readonly_error (tree, enum lvalue_use);
extern void complete_type_check_abstract (tree);
extern int abstract_virtuals_error (tree, tree);
extern int abstract_virtuals_error (abstract_class_use, tree);
extern int abstract_virtuals_error_sfinae (tree, tree, tsubst_flags_t);
extern int abstract_virtuals_error_sfinae (abstract_class_use, tree, tsubst_flags_t);
extern tree store_init_value (tree, tree, vec<tree, va_gc>**, int);
extern tree split_nonconstant_init (tree, tree);
extern bool check_narrowing (tree, tree, tsubst_flags_t);
extern tree digest_init (tree, tree, tsubst_flags_t);
extern tree digest_init_flags (tree, tree, int, tsubst_flags_t);
extern tree digest_nsdmi_init (tree, tree, tsubst_flags_t);
extern tree build_scoped_ref (tree, tree, tree *);
extern tree build_x_arrow (location_t, tree,
tsubst_flags_t);
extern tree build_m_component_ref (tree, tree, tsubst_flags_t);
extern tree build_functional_cast (tree, tree, tsubst_flags_t);
extern tree add_exception_specifier (tree, tree, int);
extern tree merge_exception_specifiers (tree, tree);
/* in mangle.c */
extern void init_mangle (void);
extern void mangle_decl (tree);
extern const char *mangle_type_string (tree);
extern tree mangle_typeinfo_for_type (tree);
extern tree mangle_typeinfo_string_for_type (tree);
extern tree mangle_vtbl_for_type (tree);
extern tree mangle_vtt_for_type (tree);
extern tree mangle_ctor_vtbl_for_type (tree, tree);
extern tree mangle_thunk (tree, int, tree, tree, tree);
extern tree mangle_guard_variable (tree);
extern tree mangle_tls_init_fn (tree);
extern tree mangle_tls_wrapper_fn (tree);
extern bool decl_tls_wrapper_p (tree);
extern tree mangle_ref_init_variable (tree);
extern char * get_mangled_vtable_map_var_name (tree);
extern bool mangle_return_type_p (tree);
extern tree mangle_decomp (tree, vec<tree> &);
/* in dump.c */
extern bool cp_dump_tree (void *, tree);
/* In cp/cp-objcp-common.c. */
extern alias_set_type cxx_get_alias_set (tree);
extern bool cxx_warn_unused_global_decl (const_tree);
extern size_t cp_tree_size (enum tree_code);
extern bool cp_var_mod_type_p (tree, tree);
extern void cxx_initialize_diagnostics (diagnostic_context *);
extern int cxx_types_compatible_p (tree, tree);
extern void init_shadowed_var_for_decl (void);
extern bool cxx_block_may_fallthru (const_tree);
/* in cp-gimplify.c */
extern int cp_gimplify_expr (tree *, gimple_seq *,
gimple_seq *);
extern void cp_genericize (tree);
extern bool cxx_omp_const_qual_no_mutable (tree);
extern enum omp_clause_default_kind cxx_omp_predetermined_sharing_1 (tree);
extern enum omp_clause_default_kind cxx_omp_predetermined_sharing (tree);
extern tree cxx_omp_clause_default_ctor (tree, tree, tree);
extern tree cxx_omp_clause_copy_ctor (tree, tree, tree);
extern tree cxx_omp_clause_assign_op (tree, tree, tree);
extern tree cxx_omp_clause_dtor (tree, tree);
extern void cxx_omp_finish_clause (tree, gimple_seq *);
extern bool cxx_omp_privatize_by_reference (const_tree);
extern bool cxx_omp_disregard_value_expr (tree, bool);
extern void cp_fold_function (tree);
extern tree cp_fully_fold (tree);
extern void clear_fold_cache (void);
/* in name-lookup.c */
extern void suggest_alternatives_for (location_t, tree, bool);
extern bool suggest_alternative_in_explicit_scope (location_t, tree, tree);
extern tree strip_using_decl (tree);
/* Tell the binding oracle what kind of binding we are looking for. */
enum cp_oracle_request
{
CP_ORACLE_IDENTIFIER
};
/* If this is non-NULL, then it is a "binding oracle" which can lazily
create bindings when needed by the C compiler. The oracle is told
the name and type of the binding to create. It can call pushdecl
or the like to ensure the binding is visible; or do nothing,
leaving the binding untouched. c-decl.c takes note of when the
oracle has been called and will not call it again if it fails to
create a given binding. */
typedef void cp_binding_oracle_function (enum cp_oracle_request, tree identifier);
extern cp_binding_oracle_function *cp_binding_oracle;
/* in constraint.cc */
extern void init_constraint_processing ();
extern bool constraint_p (tree);
extern tree conjoin_constraints (tree, tree);
extern tree conjoin_constraints (tree);
extern tree get_constraints (tree);
extern void set_constraints (tree, tree);
extern void remove_constraints (tree);
extern tree current_template_constraints (void);
extern tree associate_classtype_constraints (tree);
extern tree build_constraints (tree, tree);
extern tree get_shorthand_constraints (tree);
extern tree build_concept_check (tree, tree, tree = NULL_TREE);
extern tree build_constrained_parameter (tree, tree, tree = NULL_TREE);
extern tree make_constrained_auto (tree, tree);
extern void placeholder_extract_concept_and_args (tree, tree&, tree&);
extern bool equivalent_placeholder_constraints (tree, tree);
extern hashval_t hash_placeholder_constraint (tree);
extern bool deduce_constrained_parameter (tree, tree&, tree&);
extern tree resolve_constraint_check (tree);
extern tree check_function_concept (tree);
extern tree finish_template_introduction (tree, tree);
extern bool valid_requirements_p (tree);
extern tree finish_concept_name (tree);
extern tree finish_shorthand_constraint (tree, tree);
extern tree finish_requires_expr (tree, tree);
extern tree finish_simple_requirement (tree);
extern tree finish_type_requirement (tree);
extern tree finish_compound_requirement (tree, tree, bool);
extern tree finish_nested_requirement (tree);
extern void check_constrained_friend (tree, tree);
extern tree tsubst_requires_expr (tree, tree, tsubst_flags_t, tree);
extern tree tsubst_constraint (tree, tree, tsubst_flags_t, tree);
extern tree tsubst_constraint_info (tree, tree, tsubst_flags_t, tree);
extern bool function_concept_check_p (tree);
extern tree normalize_expression (tree);
extern tree expand_concept (tree, tree);
extern bool expanding_concept ();
extern tree evaluate_constraints (tree, tree);
extern tree evaluate_function_concept (tree, tree);
extern tree evaluate_variable_concept (tree, tree);
extern tree evaluate_constraint_expression (tree, tree);
extern bool constraints_satisfied_p (tree);
extern bool constraints_satisfied_p (tree, tree);
extern tree lookup_constraint_satisfaction (tree, tree);
extern tree memoize_constraint_satisfaction (tree, tree, tree);
extern tree lookup_concept_satisfaction (tree, tree);
extern tree memoize_concept_satisfaction (tree, tree, tree);
extern tree get_concept_expansion (tree, tree);
extern tree save_concept_expansion (tree, tree, tree);
extern bool* lookup_subsumption_result (tree, tree);
extern bool save_subsumption_result (tree, tree, bool);
extern bool equivalent_constraints (tree, tree);
extern bool equivalently_constrained (tree, tree);
extern bool subsumes_constraints (tree, tree);
extern bool strictly_subsumes (tree, tree);
extern int more_constrained (tree, tree);
extern void diagnose_constraints (location_t, tree, tree);
/* in logic.cc */
extern tree decompose_conclusions (tree);
extern bool subsumes (tree, tree);
/* In class.c */
extern void cp_finish_injected_record_type (tree);
/* in vtable-class-hierarchy.c */
extern void vtv_compute_class_hierarchy_transitive_closure (void);
extern void vtv_generate_init_routine (void);
extern void vtv_save_class_info (tree);
extern void vtv_recover_class_info (void);
extern void vtv_build_vtable_verify_fndecl (void);
/* In constexpr.c */
extern void fini_constexpr (void);
extern bool literal_type_p (tree);
extern tree register_constexpr_fundef (tree, tree);
extern bool is_valid_constexpr_fn (tree, bool);
extern bool check_constexpr_ctor_body (tree, tree, bool);
extern tree constexpr_fn_retval (tree);
extern tree ensure_literal_type_for_constexpr_object (tree);
extern bool potential_constant_expression (tree);
extern bool is_constant_expression (tree);
extern bool is_nondependent_constant_expression (tree);
extern bool is_nondependent_static_init_expression (tree);
extern bool is_static_init_expression (tree);
extern bool potential_rvalue_constant_expression (tree);
extern bool require_potential_constant_expression (tree);
extern bool require_constant_expression (tree);
extern bool require_rvalue_constant_expression (tree);
extern bool require_potential_rvalue_constant_expression (tree);
extern tree cxx_constant_value (tree, tree = NULL_TREE);
extern tree cxx_constant_init (tree, tree = NULL_TREE);
extern tree maybe_constant_value (tree, tree = NULL_TREE);
extern tree maybe_constant_init (tree, tree = NULL_TREE);
extern tree fold_non_dependent_expr (tree, tsubst_flags_t = tf_none);
extern tree fold_simple (tree);
extern bool is_sub_constant_expr (tree);
extern bool reduced_constant_expression_p (tree);
extern bool is_instantiation_of_constexpr (tree);
extern bool var_in_constexpr_fn (tree);
extern bool var_in_maybe_constexpr_fn (tree);
extern void explain_invalid_constexpr_fn (tree);
extern vec<tree> cx_error_context (void);
extern tree fold_sizeof_expr (tree);
extern void clear_cv_and_fold_caches (void);
/* In cp-ubsan.c */
extern void cp_ubsan_maybe_instrument_member_call (tree);
extern void cp_ubsan_instrument_member_accesses (tree *);
extern tree cp_ubsan_maybe_instrument_downcast (location_t, tree, tree, tree);
extern tree cp_ubsan_maybe_instrument_cast_to_vbase (location_t, tree, tree);
extern void cp_ubsan_maybe_initialize_vtbl_ptrs (tree);
/* Inline bodies. */
inline tree
ovl_first (tree node)
{
while (TREE_CODE (node) == OVERLOAD)
node = OVL_FUNCTION (node);
return node;
}
inline bool
type_unknown_p (const_tree expr)
{
return TREE_TYPE (expr) == unknown_type_node;
}
inline hashval_t
named_decl_hash::hash (const value_type decl)
{
tree name = OVL_NAME (decl);
return name ? IDENTIFIER_HASH_VALUE (name) : 0;
}
inline bool
named_decl_hash::equal (const value_type existing, compare_type candidate)
{
tree name = OVL_NAME (existing);
return candidate == name;
}
inline bool
null_node_p (const_tree expr)
{
STRIP_ANY_LOCATION_WRAPPER (expr);
return expr == null_node;
}
#if CHECKING_P
namespace selftest {
extern void run_cp_tests (void);
/* Declarations for specific families of tests within cp,
by source file, in alphabetical order. */
extern void cp_pt_c_tests ();
extern void cp_tree_c_tests (void);
} // namespace selftest
#endif /* #if CHECKING_P */
/* -- end of C++ */
#endif /* ! GCC_CP_TREE_H */
|
GB_unop__one_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__one_fp64_fp64)
// op(A') function: GB (_unop_tran__one_fp64_fp64)
// C type: double
// A type: double
// cast: ;
// unaryop: cij = 1
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = 1 ;
// casting
#define GB_CAST(z, aij) \
; ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
; ; \
/* Cx [pC] = op (cast (aij)) */ \
; ; \
Cx [pC] = 1 ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ONE || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__one_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
; ;
; ;
Cx [p] = 1 ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
; ;
; ;
Cx [p] = 1 ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__one_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
broadcast_reduce-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015-2017 by Contributors
* \file broadcast_reduce_kernel.h
* \brief Function definition of elementwise unary operators
*/
#ifndef MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_
#define MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_
#include <mxnet/operator_util.h>
#include <algorithm>
#include <vector>
#include <string>
#include <utility>
#include "../mshadow_op.h"
namespace mxnet {
namespace op {
namespace broadcast {
using namespace mshadow;
const int MAX_DIM = 5;
template<int ndim>
MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) {
Shape<ndim> stride;
index_t cumprod = 1;
#pragma unroll
for (int i = ndim - 1; i >= 0; --i) {
stride[i] = (shape[i] > 1) ? cumprod : 0;
cumprod *= shape[i];
}
return stride;
}
template<int ndim>
MSHADOW_XINLINE void unravel_dot(const int idx, const Shape<ndim>& shape,
const Shape<ndim>& stridej, const Shape<ndim>& stridek, int* j, int* k) {
*j = 0;
*k = 0;
#pragma unroll
for (int i = ndim-1, idx_t = idx; i >=0; --i) {
const int tmp = idx_t / shape[i];
const int coord = idx_t - tmp*shape[i];
*j += coord*stridej[i];
*k += coord*stridek[i];
idx_t = tmp;
}
}
template<int ndim>
MSHADOW_XINLINE Shape<ndim> unravel(const int idx, const Shape<ndim>& shape) {
Shape<ndim> ret;
#pragma unroll
for (int i = ndim-1, j = idx; i >=0; --i) {
int tmp = j / shape[i];
ret[i] = j - tmp*shape[i];
j = tmp;
}
return ret;
}
template<int ndim>
MSHADOW_XINLINE int ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) {
int ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret = ret * shape[i] + (shape[i] > 1) * coord[i];
}
return ret;
}
template<int ndim>
MSHADOW_XINLINE int diff(const Shape<ndim>& small, const Shape<ndim>& big, Shape<ndim>* dims,
Shape<ndim>* stride) {
int mdim = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
mdim += small[i] != big[i];
(*dims)[i] = (*stride)[i] = 1;
}
#pragma unroll
for (int i = ndim-1, j = mdim, s = 1; i >= 0; --i) {
if (small[i] != big[i]) {
--j;
(*stride)[j] = s;
(*dims)[j] = big[i];
}
s *= big[i];
}
return mdim;
}
template<int ndim>
MSHADOW_XINLINE int unravel_dot(const int idx, const Shape<ndim>& shape,
const Shape<ndim>& stride) {
int ret = 0;
#pragma unroll
for (int i = ndim-1, j = idx; i >=0; --i) {
int tmp = j / shape[i];
ret += (j - tmp*shape[i])*stride[i];
j = tmp;
}
return ret;
}
template<int ndim>
MSHADOW_XINLINE int dot(const Shape<ndim>& coord, const Shape<ndim>& stride) {
int ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i)
ret += coord[i] * stride[i];
return ret;
}
template<typename DType>
MSHADOW_XINLINE void assign(DType* dst, const bool addto, const DType src) {
if (addto) {
*dst += src;
} else {
*dst = src;
}
}
template<int ndim, typename DType, typename OP>
MSHADOW_XINLINE void binary_broadcast_assign(const int idx, const bool addto,
const DType* __restrict lhs,
const DType* __restrict rhs, DType* out,
const Shape<ndim>& lshape, const Shape<ndim>& rshape,
const Shape<ndim>& oshape) {
const Shape<ndim> coord = unravel(idx, oshape);
const int j = ravel(coord, lshape);
const int k = ravel(coord, rshape);
assign(&out[idx], addto, OP::Map(lhs[j], rhs[k]));
}
template<typename Reducer, int ndim, typename DType, typename OP>
MSHADOW_XINLINE void seq_reduce_assign(const int idx, const int M, const bool addto,
const DType* __restrict big, DType *small,
const Shape<ndim>& bshape, const Shape<ndim>& sshape,
const Shape<ndim>& rshape, const Shape<ndim>& rstride) {
Shape<ndim> coord = unravel(idx, sshape);
int j = ravel(coord, bshape);
DType val, residual;
Reducer::SetInitValue(val, residual);
for (int k = 0; k < M; ++k) {
coord = unravel(k, rshape);
Reducer::Reduce(val, OP::Map(big[j + dot(coord, rstride)]), residual);
}
Reducer::Finalize(val, residual);
assign(&small[idx], addto, val);
}
#ifdef __CUDACC__
#include "broadcast_reduce-inl.cuh"
#else
template<int ndim, typename DType, typename OP>
void binary_broadcast_compute(const int N, const bool addto, const DType *lhs,
const DType *rhs, DType *out, const Shape<ndim> lshape,
const Shape<ndim> rshape, const Shape<ndim> oshape) {
for (int idx = 0; idx < N; ++idx) {
binary_broadcast_assign<ndim, DType, OP>(idx, addto, lhs, rhs, out, lshape, rshape, oshape);
}
}
template<int ndim, typename DType, typename OP>
void BinaryBroadcastComputeImpl(Stream<cpu> *s, const OpReqType req,
const TBlob& lhs, const TBlob& rhs, const TBlob& out) {
if (req == kNullOp) return;
int N = out.shape_.Size();
binary_broadcast_compute<ndim, DType, OP>(N, req == kAddTo, lhs.dptr<DType>(), rhs.dptr<DType>(),
out.dptr<DType>(), lhs.shape_.get<ndim>(), rhs.shape_.get<ndim>(),
out.shape_.get<ndim>());
}
template<typename Reducer, int ndim, typename DType, typename OP>
void seq_reduce_compute(const int N, const int M, const bool addto,
const DType *big, DType *small, const Shape<ndim> bshape,
const Shape<ndim> sshape, const Shape<ndim> rshape,
const Shape<ndim> rstride) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int idx = 0; idx < N; ++idx) {
seq_reduce_assign<Reducer, ndim, DType, OP>(idx, M, addto, big, small, bshape, sshape, rshape,
rstride);
}
}
template<typename Reducer, int ndim, typename DType, typename OP>
void Reduce(Stream<cpu> *s, const TBlob& small, const OpReqType req,
const Tensor<cpu, 1, char>& workspace, const TBlob& big) {
if (req == kNullOp) return;
Shape<ndim> rshape, rstride;
diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride);
int N = small.shape_.Size(), M = rshape.Size();
seq_reduce_compute<Reducer, ndim, DType, OP>(
N, M, req == kAddTo, big.dptr<DType>(), small.dptr<DType>(), big.shape_.get<ndim>(),
small.shape_.get<ndim>(), rshape, rstride);
}
template<int ndim, typename DType>
size_t ReduceWorkspaceSize(Stream<cpu> *s, const TShape& small, const OpReqType req,
const TShape& big) {
return 0;
}
template<int ndim, typename DType>
size_t ReduceWorkspaceSize(Stream<cpu> *s, const TShape& small, const OpReqType req,
const TShape& big, const TShape& lhs, const TShape& rhs) {
return 0;
}
template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2>
MSHADOW_XINLINE void seq_reduce_assign(const int idx, const int M, const bool addto,
const DType* __restrict big, const DType* __restrict lhs,
const DType* __restrict rhs, DType *small,
const Shape<ndim>& big_shape, const Shape<ndim>& lhs_shape0,
const Shape<ndim>& rhs_shape0,
const Shape<ndim>& small_shape, const Shape<ndim>& rshape,
const Shape<ndim>& lhs_shape, const Shape<ndim>& rhs_shape,
const Shape<ndim>& rstride, const Shape<ndim>& lhs_stride,
const Shape<ndim>& rhs_stride) {
Shape<ndim> coord = unravel(idx, small_shape);
const int idx_big0 = ravel(coord, big_shape);
const int idx_lhs0 = ravel(coord, lhs_shape0);
const int idx_rhs0 = ravel(coord, rhs_shape0);
DType val, residual;
Reducer::SetInitValue(val, residual);
for (int k = 0; k < M; ++k) {
Shape<ndim> coord_big = unravel(k, rshape);
int idx_big = idx_big0 + dot(coord_big, rstride);
Shape<ndim> coord_lhs = unravel(k, lhs_shape);
int idx_lhs = idx_lhs0 + dot(coord_lhs, lhs_stride);
Shape<ndim> coord_rhs = unravel(k, rhs_shape);
int idx_rhs = idx_rhs0 + dot(coord_rhs, rhs_stride);
Reducer::Reduce(val, OP1::Map(big[idx_big], OP2::Map(lhs[idx_lhs], rhs[idx_rhs])), residual);
}
Reducer::Finalize(val, residual);
assign(&small[idx], addto, val);
}
template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2>
void seq_reduce_compute(const int N, const int M, const bool addto,
const DType *big, const DType *lhs, const DType *rhs, DType *small,
const Shape<ndim> big_shape, const Shape<ndim> small_shape,
const Shape<ndim> rshape, const Shape<ndim> rstride,
const Shape<ndim> lhs_shape, const Shape<ndim> lhs_stride,
const Shape<ndim> rhs_shape, const Shape<ndim> rhs_stride,
const Shape<ndim>& lhs_shape0, const Shape<ndim>& rhs_shape0) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int idx = 0; idx < N; ++idx) {
seq_reduce_assign<Reducer, ndim, DType, OP1, OP2>(idx, M, addto, big, lhs, rhs, small,
big_shape, lhs_shape0, rhs_shape0, small_shape, rshape, lhs_shape, rhs_shape, rstride,
lhs_stride, rhs_stride);
}
}
template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2>
void Reduce(Stream<cpu> *s, const TBlob& small, const OpReqType req,
const Tensor<cpu, 1, char>& workspace, const TBlob& big, const TBlob& lhs,
const TBlob& rhs) {
if (req == kNullOp) return;
Shape<ndim> rshape, rstride;
diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride);
int N = small.shape_.Size();
int M = rshape.Size();
Shape<ndim> lhs_shape, lhs_stride;
diff(small.shape_.get<ndim>(), lhs.shape_.get<ndim>(), &lhs_shape, &lhs_stride);
Shape<ndim> rhs_shape, rhs_stride;
diff(small.shape_.get<ndim>(), rhs.shape_.get<ndim>(), &rhs_shape, &rhs_stride);
seq_reduce_compute<Reducer, ndim, DType, OP1, OP2>(
N, M, req == kAddTo,
big.dptr<DType>(), lhs.dptr<DType>(), rhs.dptr<DType>(), small.dptr<DType>(),
big.shape_.get<ndim>(), small.shape_.get<ndim>(),
rshape, rstride,
lhs_shape, lhs_stride,
rhs_shape, rhs_stride,
lhs.shape_.get<ndim>(), rhs.shape_.get<ndim>());
}
#endif
} // namespace broadcast
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_
|
statistic.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS TTTTT AAA TTTTT IIIII SSSSS TTTTT IIIII CCCC %
% SS T A A T I SS T I C %
% SSS T AAAAA T I SSS T I C %
% SS T A A T I SS T I C %
% SSSSS T A A T IIIII SSSSS T IIIII CCCC %
% %
% %
% MagickCore Image Statistical Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/accelerate-private.h"
#include "magick/animate.h"
#include "magick/animate.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/compress.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/display.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/list.h"
#include "magick/image-private.h"
#include "magick/magic.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/module.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-private.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/random-private.h"
#include "magick/resource_.h"
#include "magick/segment.h"
#include "magick/semaphore.h"
#include "magick/signature-private.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/timer.h"
#include "magick/utility.h"
#include "magick/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E v a l u a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EvaluateImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the EvaluateImageChannel method is:
%
% MagickBooleanType EvaluateImage(Image *image,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImages(Image *images,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImageChannel(Image *image,
% const ChannelType channel,const MagickEvaluateOperator op,
% const double value,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o op: A channel op.
%
% o value: A value value.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickPixelPacket **DestroyPixelThreadSet(const Image *images,
MagickPixelPacket **pixels)
{
register ssize_t
i;
size_t
rows;
assert(pixels != (MagickPixelPacket **) NULL);
rows=MagickMax(GetImageListLength(images),
(size_t) GetMagickResourceLimit(ThreadResource));
for (i=0; i < (ssize_t) rows; i++)
if (pixels[i] != (MagickPixelPacket *) NULL)
pixels[i]=(MagickPixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(MagickPixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static MagickPixelPacket **AcquirePixelThreadSet(const Image *images)
{
const Image
*next;
MagickPixelPacket
**pixels;
register ssize_t
i,
j;
size_t
columns,
rows;
rows=MagickMax(GetImageListLength(images),
(size_t) GetMagickResourceLimit(ThreadResource));
pixels=(MagickPixelPacket **) AcquireQuantumMemory(rows,sizeof(*pixels));
if (pixels == (MagickPixelPacket **) NULL)
return((MagickPixelPacket **) NULL);
(void) memset(pixels,0,rows*sizeof(*pixels));
columns=GetImageListLength(images);
for (next=images; next != (Image *) NULL; next=next->next)
columns=MagickMax(next->columns,columns);
for (i=0; i < (ssize_t) rows; i++)
{
pixels[i]=(MagickPixelPacket *) AcquireQuantumMemory(columns,
sizeof(**pixels));
if (pixels[i] == (MagickPixelPacket *) NULL)
return(DestroyPixelThreadSet(images,pixels));
for (j=0; j < (ssize_t) columns; j++)
GetMagickPixelPacket(images,&pixels[i][j]);
}
return(pixels);
}
static inline double EvaluateMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const MagickPixelPacket
*color_1,
*color_2;
int
intensity;
color_1=(const MagickPixelPacket *) x;
color_2=(const MagickPixelPacket *) y;
intensity=(int) MagickPixelIntensity(color_2)-(int)
MagickPixelIntensity(color_1);
return(intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickRealType ApplyEvaluateOperator(RandomInfo *random_info,
const Quantum pixel,const MagickEvaluateOperator op,
const MagickRealType value)
{
MagickRealType
result;
register ssize_t
i;
result=0.0;
switch (op)
{
case UndefinedEvaluateOperator:
break;
case AbsEvaluateOperator:
{
result=(MagickRealType) fabs((double) (pixel+value));
break;
}
case AddEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case AddModulusEvaluateOperator:
{
/*
This returns a 'floored modulus' of the addition which is a
positive result. It differs from % or fmod() which returns a
'truncated modulus' result, where floor() is replaced by trunc()
and could return a negative result (which is clipped).
*/
result=pixel+value;
result-=(QuantumRange+1.0)*floor((double) result/(QuantumRange+1.0));
break;
}
case AndEvaluateOperator:
{
result=(MagickRealType) ((ssize_t) pixel & (ssize_t) (value+0.5));
break;
}
case CosineEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*(0.5*cos((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case DivideEvaluateOperator:
{
result=pixel/(value == 0.0 ? 1.0 : value);
break;
}
case ExponentialEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*exp((double) (value*QuantumScale*
pixel)));
break;
}
case GaussianNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
GaussianNoise,value);
break;
}
case ImpulseNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
ImpulseNoise,value);
break;
}
case LaplacianNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
LaplacianNoise,value);
break;
}
case LeftShiftEvaluateOperator:
{
result=(double) pixel;
for (i=0; i < (ssize_t) value; i++)
result*=2.0;
break;
}
case LogEvaluateOperator:
{
if ((QuantumScale*pixel) >= MagickEpsilon)
result=(MagickRealType) (QuantumRange*log((double) (QuantumScale*value*
pixel+1.0))/log((double) (value+1.0)));
break;
}
case MaxEvaluateOperator:
{
result=(MagickRealType) EvaluateMax((double) pixel,value);
break;
}
case MeanEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case MedianEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case MinEvaluateOperator:
{
result=(MagickRealType) MagickMin((double) pixel,value);
break;
}
case MultiplicativeNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
MultiplicativeGaussianNoise,value);
break;
}
case MultiplyEvaluateOperator:
{
result=(MagickRealType) (value*pixel);
break;
}
case OrEvaluateOperator:
{
result=(MagickRealType) ((ssize_t) pixel | (ssize_t) (value+0.5));
break;
}
case PoissonNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
PoissonNoise,value);
break;
}
case PowEvaluateOperator:
{
if (pixel < 0)
result=(MagickRealType) -(QuantumRange*pow((double) -(QuantumScale*
pixel),(double) value));
else
result=(MagickRealType) (QuantumRange*pow((double) (QuantumScale*pixel),
(double) value));
break;
}
case RightShiftEvaluateOperator:
{
result=(MagickRealType) pixel;
for (i=0; i < (ssize_t) value; i++)
result/=2.0;
break;
}
case RootMeanSquareEvaluateOperator:
{
result=(MagickRealType) (pixel*pixel+value);
break;
}
case SetEvaluateOperator:
{
result=value;
break;
}
case SineEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*(0.5*sin((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case SubtractEvaluateOperator:
{
result=(MagickRealType) (pixel-value);
break;
}
case SumEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case ThresholdEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel <= value) ? 0 :
QuantumRange);
break;
}
case ThresholdBlackEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel <= value) ? 0 : pixel);
break;
}
case ThresholdWhiteEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel > value) ? QuantumRange :
pixel);
break;
}
case UniformNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
UniformNoise,value);
break;
}
case XorEvaluateOperator:
{
result=(MagickRealType) ((ssize_t) pixel ^ (ssize_t) (value+0.5));
break;
}
}
return(result);
}
static Image *AcquireImageCanvas(const Image *images,ExceptionInfo *exception)
{
const Image
*p,
*q;
size_t
columns,
number_channels,
rows;
q=images;
columns=images->columns;
rows=images->rows;
number_channels=0;
for (p=images; p != (Image *) NULL; p=p->next)
{
size_t
channels;
channels=3;
if (p->matte != MagickFalse)
channels+=1;
if (p->colorspace == CMYKColorspace)
channels+=1;
if (channels > number_channels)
{
number_channels=channels;
q=p;
}
if (p->columns > columns)
columns=p->columns;
if (p->rows > rows)
rows=p->rows;
}
return(CloneImage(q,columns,rows,MagickTrue,exception));
}
MagickExport MagickBooleanType EvaluateImage(Image *image,
const MagickEvaluateOperator op,const double value,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=EvaluateImageChannel(image,CompositeChannels,op,value,exception);
return(status);
}
MagickExport Image *EvaluateImages(const Image *images,
const MagickEvaluateOperator op,ExceptionInfo *exception)
{
#define EvaluateImageTag "Evaluate/Image"
CacheView
*evaluate_view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
**magick_restrict evaluate_pixels,
zero;
RandomInfo
**magick_restrict random_info;
size_t
number_images;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImageCanvas(images,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
image=DestroyImage(image);
return((Image *) NULL);
}
evaluate_pixels=AcquirePixelThreadSet(images);
if (evaluate_pixels == (MagickPixelPacket **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Evaluate image pixels.
*/
status=MagickTrue;
progress=0;
number_images=GetImageListLength(images);
GetMagickPixelPacket(images,&zero);
random_info=AcquireRandomInfoThreadSet();
evaluate_view=AcquireAuthenticCacheView(image,exception);
if (op == MedianEvaluateOperator)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register IndexPacket
*magick_restrict evaluate_indexes;
register MagickPixelPacket
*evaluate_pixel;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
evaluate_indexes=GetCacheViewAuthenticIndexQueue(evaluate_view);
evaluate_pixel=evaluate_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) number_images; i++)
evaluate_pixel[i]=zero;
next=images;
for (i=0; i < (ssize_t) number_images; i++)
{
register const IndexPacket
*indexes;
register const PixelPacket
*p;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,x,y,1,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
evaluate_pixel[i].red=ApplyEvaluateOperator(random_info[id],
GetPixelRed(p),op,evaluate_pixel[i].red);
evaluate_pixel[i].green=ApplyEvaluateOperator(random_info[id],
GetPixelGreen(p),op,evaluate_pixel[i].green);
evaluate_pixel[i].blue=ApplyEvaluateOperator(random_info[id],
GetPixelBlue(p),op,evaluate_pixel[i].blue);
evaluate_pixel[i].opacity=ApplyEvaluateOperator(random_info[id],
GetPixelAlpha(p),op,evaluate_pixel[i].opacity);
if (image->colorspace == CMYKColorspace)
evaluate_pixel[i].index=ApplyEvaluateOperator(random_info[id],
*indexes,op,evaluate_pixel[i].index);
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
qsort((void *) evaluate_pixel,number_images,sizeof(*evaluate_pixel),
IntensityCompare);
SetPixelRed(q,ClampToQuantum(evaluate_pixel[i/2].red));
SetPixelGreen(q,ClampToQuantum(evaluate_pixel[i/2].green));
SetPixelBlue(q,ClampToQuantum(evaluate_pixel[i/2].blue));
SetPixelAlpha(q,ClampToQuantum(evaluate_pixel[i/2].opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(evaluate_indexes+i,ClampToQuantum(
evaluate_pixel[i/2].index));
q++;
}
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,EvaluateImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
else
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register IndexPacket
*magick_restrict evaluate_indexes;
register ssize_t
i,
x;
register MagickPixelPacket
*evaluate_pixel;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
evaluate_indexes=GetCacheViewAuthenticIndexQueue(evaluate_view);
evaluate_pixel=evaluate_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
evaluate_pixel[x]=zero;
next=images;
for (i=0; i < (ssize_t) number_images; i++)
{
register const IndexPacket
*indexes;
register const PixelPacket
*p;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,
exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
evaluate_pixel[x].red=ApplyEvaluateOperator(random_info[id],
GetPixelRed(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].red);
evaluate_pixel[x].green=ApplyEvaluateOperator(random_info[id],
GetPixelGreen(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].green);
evaluate_pixel[x].blue=ApplyEvaluateOperator(random_info[id],
GetPixelBlue(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].blue);
evaluate_pixel[x].opacity=ApplyEvaluateOperator(random_info[id],
GetPixelAlpha(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].opacity);
if (image->colorspace == CMYKColorspace)
evaluate_pixel[x].index=ApplyEvaluateOperator(random_info[id],
GetPixelIndex(indexes+x),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].index);
p++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (op == MeanEvaluateOperator)
for (x=0; x < (ssize_t) image->columns; x++)
{
evaluate_pixel[x].red/=number_images;
evaluate_pixel[x].green/=number_images;
evaluate_pixel[x].blue/=number_images;
evaluate_pixel[x].opacity/=number_images;
evaluate_pixel[x].index/=number_images;
}
if (op == RootMeanSquareEvaluateOperator)
for (x=0; x < (ssize_t) image->columns; x++)
{
evaluate_pixel[x].red=sqrt((double) evaluate_pixel[x].red/
number_images);
evaluate_pixel[x].green=sqrt((double) evaluate_pixel[x].green/
number_images);
evaluate_pixel[x].blue=sqrt((double) evaluate_pixel[x].blue/
number_images);
evaluate_pixel[x].opacity=sqrt((double) evaluate_pixel[x].opacity/
number_images);
evaluate_pixel[x].index=sqrt((double) evaluate_pixel[x].index/
number_images);
}
if (op == MultiplyEvaluateOperator)
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) (number_images-1); j++)
{
evaluate_pixel[x].red*=(MagickRealType) QuantumScale;
evaluate_pixel[x].green*=(MagickRealType) QuantumScale;
evaluate_pixel[x].blue*=(MagickRealType) QuantumScale;
evaluate_pixel[x].opacity*=(MagickRealType) QuantumScale;
evaluate_pixel[x].index*=(MagickRealType) QuantumScale;
}
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(evaluate_pixel[x].red));
SetPixelGreen(q,ClampToQuantum(evaluate_pixel[x].green));
SetPixelBlue(q,ClampToQuantum(evaluate_pixel[x].blue));
SetPixelAlpha(q,ClampToQuantum(evaluate_pixel[x].opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(evaluate_indexes+x,ClampToQuantum(
evaluate_pixel[x].index));
q++;
}
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(images,EvaluateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
evaluate_view=DestroyCacheView(evaluate_view);
evaluate_pixels=DestroyPixelThreadSet(images,evaluate_pixels);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
MagickExport MagickBooleanType EvaluateImageChannel(Image *image,
const ChannelType channel,const MagickEvaluateOperator op,const double value,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
result;
if ((channel & RedChannel) != 0)
{
result=ApplyEvaluateOperator(random_info[id],GetPixelRed(q),op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelRed(q,ClampToQuantum(result));
}
if ((channel & GreenChannel) != 0)
{
result=ApplyEvaluateOperator(random_info[id],GetPixelGreen(q),op,
value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelGreen(q,ClampToQuantum(result));
}
if ((channel & BlueChannel) != 0)
{
result=ApplyEvaluateOperator(random_info[id],GetPixelBlue(q),op,
value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelBlue(q,ClampToQuantum(result));
}
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
{
result=ApplyEvaluateOperator(random_info[id],GetPixelOpacity(q),
op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelOpacity(q,ClampToQuantum(result));
}
else
{
result=ApplyEvaluateOperator(random_info[id],GetPixelAlpha(q),
op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelAlpha(q,ClampToQuantum(result));
}
}
if (((channel & IndexChannel) != 0) && (indexes != (IndexPacket *) NULL))
{
result=ApplyEvaluateOperator(random_info[id],GetPixelIndex(indexes+x),
op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelIndex(indexes+x,ClampToQuantum(result));
}
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,EvaluateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F u n c t i o n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FunctionImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the FunctionImageChannel method is:
%
% MagickBooleanType FunctionImage(Image *image,
% const MagickFunction function,const ssize_t number_parameters,
% const double *parameters,ExceptionInfo *exception)
% MagickBooleanType FunctionImageChannel(Image *image,
% const ChannelType channel,const MagickFunction function,
% const ssize_t number_parameters,const double *argument,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o function: A channel function.
%
% o parameters: one or more parameters.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum ApplyFunction(Quantum pixel,const MagickFunction function,
const size_t number_parameters,const double *parameters,
ExceptionInfo *exception)
{
MagickRealType
result;
register ssize_t
i;
(void) exception;
result=0.0;
switch (function)
{
case PolynomialFunction:
{
/*
* Polynomial
* Parameters: polynomial constants, highest to lowest order
* For example: c0*x^3 + c1*x^2 + c2*x + c3
*/
result=0.0;
for (i=0; i < (ssize_t) number_parameters; i++)
result=result*QuantumScale*pixel + parameters[i];
result*=QuantumRange;
break;
}
case SinusoidFunction:
{
/* Sinusoid Function
* Parameters: Freq, Phase, Ampl, bias
*/
double freq,phase,ampl,bias;
freq = ( number_parameters >= 1 ) ? parameters[0] : 1.0;
phase = ( number_parameters >= 2 ) ? parameters[1] : 0.0;
ampl = ( number_parameters >= 3 ) ? parameters[2] : 0.5;
bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5;
result=(MagickRealType) (QuantumRange*(ampl*sin((double) (2.0*MagickPI*
(freq*QuantumScale*pixel + phase/360.0) )) + bias ) );
break;
}
case ArcsinFunction:
{
/* Arcsin Function (peged at range limits for invalid results)
* Parameters: Width, Center, Range, Bias
*/
double width,range,center,bias;
width = ( number_parameters >= 1 ) ? parameters[0] : 1.0;
center = ( number_parameters >= 2 ) ? parameters[1] : 0.5;
range = ( number_parameters >= 3 ) ? parameters[2] : 1.0;
bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5;
result = 2.0/width*(QuantumScale*pixel - center);
if ( result <= -1.0 )
result = bias - range/2.0;
else if ( result >= 1.0 )
result = bias + range/2.0;
else
result=(MagickRealType) (range/MagickPI*asin((double) result)+bias);
result *= QuantumRange;
break;
}
case ArctanFunction:
{
/* Arctan Function
* Parameters: Slope, Center, Range, Bias
*/
double slope,range,center,bias;
slope = ( number_parameters >= 1 ) ? parameters[0] : 1.0;
center = ( number_parameters >= 2 ) ? parameters[1] : 0.5;
range = ( number_parameters >= 3 ) ? parameters[2] : 1.0;
bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5;
result=(MagickRealType) (MagickPI*slope*(QuantumScale*pixel-center));
result=(MagickRealType) (QuantumRange*(range/MagickPI*atan((double)
result) + bias ) );
break;
}
case UndefinedFunction:
break;
}
return(ClampToQuantum(result));
}
MagickExport MagickBooleanType FunctionImage(Image *image,
const MagickFunction function,const size_t number_parameters,
const double *parameters,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=FunctionImageChannel(image,CompositeChannels,function,
number_parameters,parameters,exception);
return(status);
}
MagickExport MagickBooleanType FunctionImageChannel(Image *image,
const ChannelType channel,const MagickFunction function,
const size_t number_parameters,const double *parameters,
ExceptionInfo *exception)
{
#define FunctionImageTag "Function/Image "
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
return(MagickFalse);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
status=AccelerateFunctionImage(image,channel,function,number_parameters,
parameters,exception);
if (status != MagickFalse)
return(status);
#endif
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ApplyFunction(GetPixelRed(q),function,
number_parameters,parameters,exception));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ApplyFunction(GetPixelGreen(q),function,
number_parameters,parameters,exception));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ApplyFunction(GetPixelBlue(q),function,
number_parameters,parameters,exception));
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
SetPixelOpacity(q,ApplyFunction(GetPixelOpacity(q),function,
number_parameters,parameters,exception));
else
SetPixelAlpha(q,ApplyFunction((Quantum) GetPixelAlpha(q),function,
number_parameters,parameters,exception));
}
if (((channel & IndexChannel) != 0) && (indexes != (IndexPacket *) NULL))
SetPixelIndex(indexes+x,ApplyFunction(GetPixelIndex(indexes+x),function,
number_parameters,parameters,exception));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,FunctionImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l E n t r o p y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelEntropy() returns the entropy of one or more image channels.
%
% The format of the GetImageChannelEntropy method is:
%
% MagickBooleanType GetImageChannelEntropy(const Image *image,
% const ChannelType channel,double *entropy,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o entropy: the average entropy of the selected channels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageEntropy(const Image *image,
double *entropy,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelEntropy(image,CompositeChannels,entropy,exception);
return(status);
}
MagickExport MagickBooleanType GetImageChannelEntropy(const Image *image,
const ChannelType channel,double *entropy,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
size_t
channels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageChannelStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
channels=0;
channel_statistics[CompositeChannels].entropy=0.0;
if ((channel & RedChannel) != 0)
{
channel_statistics[CompositeChannels].entropy+=
channel_statistics[RedChannel].entropy;
channels++;
}
if ((channel & GreenChannel) != 0)
{
channel_statistics[CompositeChannels].entropy+=
channel_statistics[GreenChannel].entropy;
channels++;
}
if ((channel & BlueChannel) != 0)
{
channel_statistics[CompositeChannels].entropy+=
channel_statistics[BlueChannel].entropy;
channels++;
}
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
{
channel_statistics[CompositeChannels].entropy+=
channel_statistics[OpacityChannel].entropy;
channels++;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_statistics[CompositeChannels].entropy+=
channel_statistics[BlackChannel].entropy;
channels++;
}
channel_statistics[CompositeChannels].entropy/=channels;
*entropy=channel_statistics[CompositeChannels].entropy;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e C h a n n e l E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelExtrema() returns the extrema of one or more image channels.
%
% The format of the GetImageChannelExtrema method is:
%
% MagickBooleanType GetImageChannelExtrema(const Image *image,
% const ChannelType channel,size_t *minima,size_t *maxima,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageExtrema(const Image *image,
size_t *minima,size_t *maxima,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelExtrema(image,CompositeChannels,minima,maxima,
exception);
return(status);
}
MagickExport MagickBooleanType GetImageChannelExtrema(const Image *image,
const ChannelType channel,size_t *minima,size_t *maxima,
ExceptionInfo *exception)
{
double
max,
min;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageChannelRange(image,channel,&min,&max,exception);
*minima=(size_t) ceil(min-0.5);
*maxima=(size_t) floor(max+0.5);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l K u r t o s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelKurtosis() returns the kurtosis and skewness of one or more
% image channels.
%
% The format of the GetImageChannelKurtosis method is:
%
% MagickBooleanType GetImageChannelKurtosis(const Image *image,
% const ChannelType channel,double *kurtosis,double *skewness,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o kurtosis: the kurtosis of the channel.
%
% o skewness: the skewness of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageKurtosis(const Image *image,
double *kurtosis,double *skewness,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelKurtosis(image,CompositeChannels,kurtosis,skewness,
exception);
return(status);
}
MagickExport MagickBooleanType GetImageChannelKurtosis(const Image *image,
const ChannelType channel,double *kurtosis,double *skewness,
ExceptionInfo *exception)
{
double
area,
mean,
standard_deviation,
sum_squares,
sum_cubes,
sum_fourth_power;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*kurtosis=0.0;
*skewness=0.0;
area=0.0;
mean=0.0;
standard_deviation=0.0;
sum_squares=0.0;
sum_cubes=0.0;
sum_fourth_power=0.0;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
{
mean+=GetPixelRed(p);
sum_squares+=(double) GetPixelRed(p)*GetPixelRed(p);
sum_cubes+=(double) GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p);
sum_fourth_power+=(double) GetPixelRed(p)*GetPixelRed(p)*
GetPixelRed(p)*GetPixelRed(p);
area++;
}
if ((channel & GreenChannel) != 0)
{
mean+=GetPixelGreen(p);
sum_squares+=(double) GetPixelGreen(p)*GetPixelGreen(p);
sum_cubes+=(double) GetPixelGreen(p)*GetPixelGreen(p)*
GetPixelGreen(p);
sum_fourth_power+=(double) GetPixelGreen(p)*GetPixelGreen(p)*
GetPixelGreen(p)*GetPixelGreen(p);
area++;
}
if ((channel & BlueChannel) != 0)
{
mean+=GetPixelBlue(p);
sum_squares+=(double) GetPixelBlue(p)*GetPixelBlue(p);
sum_cubes+=(double) GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p);
sum_fourth_power+=(double) GetPixelBlue(p)*GetPixelBlue(p)*
GetPixelBlue(p)*GetPixelBlue(p);
area++;
}
if ((channel & OpacityChannel) != 0)
{
mean+=GetPixelAlpha(p);
sum_squares+=(double) GetPixelOpacity(p)*GetPixelAlpha(p);
sum_cubes+=(double) GetPixelOpacity(p)*GetPixelAlpha(p)*
GetPixelAlpha(p);
sum_fourth_power+=(double) GetPixelAlpha(p)*GetPixelAlpha(p)*
GetPixelAlpha(p)*GetPixelAlpha(p);
area++;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
double
index;
index=(double) GetPixelIndex(indexes+x);
mean+=index;
sum_squares+=index*index;
sum_cubes+=index*index*index;
sum_fourth_power+=index*index*index*index;
area++;
}
p++;
}
}
if (y < (ssize_t) image->rows)
return(MagickFalse);
if (area != 0.0)
{
mean/=area;
sum_squares/=area;
sum_cubes/=area;
sum_fourth_power/=area;
}
standard_deviation=sqrt(sum_squares-(mean*mean));
if (standard_deviation != 0.0)
{
*kurtosis=sum_fourth_power-4.0*mean*sum_cubes+6.0*mean*mean*sum_squares-
3.0*mean*mean*mean*mean;
*kurtosis/=standard_deviation*standard_deviation*standard_deviation*
standard_deviation;
*kurtosis-=3.0;
*skewness=sum_cubes-3.0*mean*sum_squares+2.0*mean*mean*mean;
*skewness/=standard_deviation*standard_deviation*standard_deviation;
}
return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l M e a n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelMean() returns the mean and standard deviation of one or more
% image channels.
%
% The format of the GetImageChannelMean method is:
%
% MagickBooleanType GetImageChannelMean(const Image *image,
% const ChannelType channel,double *mean,double *standard_deviation,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o mean: the average value in the channel.
%
% o standard_deviation: the standard deviation of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageMean(const Image *image,double *mean,
double *standard_deviation,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelMean(image,CompositeChannels,mean,standard_deviation,
exception);
return(status);
}
MagickExport MagickBooleanType GetImageChannelMean(const Image *image,
const ChannelType channel,double *mean,double *standard_deviation,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
size_t
channels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageChannelStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
channels=0;
channel_statistics[CompositeChannels].mean=0.0;
channel_statistics[CompositeChannels].standard_deviation=0.0;
if ((channel & RedChannel) != 0)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[RedChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[RedChannel].standard_deviation;
channels++;
}
if ((channel & GreenChannel) != 0)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[GreenChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[GreenChannel].standard_deviation;
channels++;
}
if ((channel & BlueChannel) != 0)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[BlueChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[BlueChannel].standard_deviation;
channels++;
}
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
{
channel_statistics[CompositeChannels].mean+=
(QuantumRange-channel_statistics[OpacityChannel].mean);
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[OpacityChannel].standard_deviation;
channels++;
}
if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[BlackChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[CompositeChannels].standard_deviation;
channels++;
}
channel_statistics[CompositeChannels].mean/=channels;
channel_statistics[CompositeChannels].standard_deviation/=channels;
*mean=channel_statistics[CompositeChannels].mean;
*standard_deviation=channel_statistics[CompositeChannels].standard_deviation;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l M o m e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelMoments() returns the normalized moments of one or more image
% channels.
%
% The format of the GetImageChannelMoments method is:
%
% ChannelMoments *GetImageChannelMoments(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ChannelMoments *GetImageChannelMoments(const Image *image,
ExceptionInfo *exception)
{
#define MaxNumberImageMoments 8
ChannelMoments
*channel_moments;
double
M00[CompositeChannels+1],
M01[CompositeChannels+1],
M02[CompositeChannels+1],
M03[CompositeChannels+1],
M10[CompositeChannels+1],
M11[CompositeChannels+1],
M12[CompositeChannels+1],
M20[CompositeChannels+1],
M21[CompositeChannels+1],
M22[CompositeChannels+1],
M30[CompositeChannels+1];
MagickPixelPacket
pixel;
PointInfo
centroid[CompositeChannels+1];
ssize_t
channel,
channels,
y;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
length=CompositeChannels+1UL;
channel_moments=(ChannelMoments *) AcquireQuantumMemory(length,
sizeof(*channel_moments));
if (channel_moments == (ChannelMoments *) NULL)
return(channel_moments);
(void) memset(channel_moments,0,length*sizeof(*channel_moments));
(void) memset(centroid,0,sizeof(centroid));
(void) memset(M00,0,sizeof(M00));
(void) memset(M01,0,sizeof(M01));
(void) memset(M02,0,sizeof(M02));
(void) memset(M03,0,sizeof(M03));
(void) memset(M10,0,sizeof(M10));
(void) memset(M11,0,sizeof(M11));
(void) memset(M12,0,sizeof(M12));
(void) memset(M20,0,sizeof(M20));
(void) memset(M21,0,sizeof(M21));
(void) memset(M22,0,sizeof(M22));
(void) memset(M30,0,sizeof(M30));
GetMagickPixelPacket(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
/*
Compute center of mass (centroid).
*/
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
M00[RedChannel]+=QuantumScale*pixel.red;
M10[RedChannel]+=x*QuantumScale*pixel.red;
M01[RedChannel]+=y*QuantumScale*pixel.red;
M00[GreenChannel]+=QuantumScale*pixel.green;
M10[GreenChannel]+=x*QuantumScale*pixel.green;
M01[GreenChannel]+=y*QuantumScale*pixel.green;
M00[BlueChannel]+=QuantumScale*pixel.blue;
M10[BlueChannel]+=x*QuantumScale*pixel.blue;
M01[BlueChannel]+=y*QuantumScale*pixel.blue;
if (image->matte != MagickFalse)
{
M00[OpacityChannel]+=QuantumScale*pixel.opacity;
M10[OpacityChannel]+=x*QuantumScale*pixel.opacity;
M01[OpacityChannel]+=y*QuantumScale*pixel.opacity;
}
if (image->colorspace == CMYKColorspace)
{
M00[IndexChannel]+=QuantumScale*pixel.index;
M10[IndexChannel]+=x*QuantumScale*pixel.index;
M01[IndexChannel]+=y*QuantumScale*pixel.index;
}
p++;
}
}
for (channel=0; channel <= CompositeChannels; channel++)
{
/*
Compute center of mass (centroid).
*/
if (M00[channel] < MagickEpsilon)
{
M00[channel]+=MagickEpsilon;
centroid[channel].x=(double) image->columns/2.0;
centroid[channel].y=(double) image->rows/2.0;
continue;
}
M00[channel]+=MagickEpsilon;
centroid[channel].x=M10[channel]/M00[channel];
centroid[channel].y=M01[channel]/M00[channel];
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
/*
Compute the image moments.
*/
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
M11[RedChannel]+=(x-centroid[RedChannel].x)*(y-
centroid[RedChannel].y)*QuantumScale*pixel.red;
M20[RedChannel]+=(x-centroid[RedChannel].x)*(x-
centroid[RedChannel].x)*QuantumScale*pixel.red;
M02[RedChannel]+=(y-centroid[RedChannel].y)*(y-
centroid[RedChannel].y)*QuantumScale*pixel.red;
M21[RedChannel]+=(x-centroid[RedChannel].x)*(x-
centroid[RedChannel].x)*(y-centroid[RedChannel].y)*QuantumScale*
pixel.red;
M12[RedChannel]+=(x-centroid[RedChannel].x)*(y-
centroid[RedChannel].y)*(y-centroid[RedChannel].y)*QuantumScale*
pixel.red;
M22[RedChannel]+=(x-centroid[RedChannel].x)*(x-
centroid[RedChannel].x)*(y-centroid[RedChannel].y)*(y-
centroid[RedChannel].y)*QuantumScale*pixel.red;
M30[RedChannel]+=(x-centroid[RedChannel].x)*(x-
centroid[RedChannel].x)*(x-centroid[RedChannel].x)*QuantumScale*
pixel.red;
M03[RedChannel]+=(y-centroid[RedChannel].y)*(y-
centroid[RedChannel].y)*(y-centroid[RedChannel].y)*QuantumScale*
pixel.red;
M11[GreenChannel]+=(x-centroid[GreenChannel].x)*(y-
centroid[GreenChannel].y)*QuantumScale*pixel.green;
M20[GreenChannel]+=(x-centroid[GreenChannel].x)*(x-
centroid[GreenChannel].x)*QuantumScale*pixel.green;
M02[GreenChannel]+=(y-centroid[GreenChannel].y)*(y-
centroid[GreenChannel].y)*QuantumScale*pixel.green;
M21[GreenChannel]+=(x-centroid[GreenChannel].x)*(x-
centroid[GreenChannel].x)*(y-centroid[GreenChannel].y)*QuantumScale*
pixel.green;
M12[GreenChannel]+=(x-centroid[GreenChannel].x)*(y-
centroid[GreenChannel].y)*(y-centroid[GreenChannel].y)*QuantumScale*
pixel.green;
M22[GreenChannel]+=(x-centroid[GreenChannel].x)*(x-
centroid[GreenChannel].x)*(y-centroid[GreenChannel].y)*(y-
centroid[GreenChannel].y)*QuantumScale*pixel.green;
M30[GreenChannel]+=(x-centroid[GreenChannel].x)*(x-
centroid[GreenChannel].x)*(x-centroid[GreenChannel].x)*QuantumScale*
pixel.green;
M03[GreenChannel]+=(y-centroid[GreenChannel].y)*(y-
centroid[GreenChannel].y)*(y-centroid[GreenChannel].y)*QuantumScale*
pixel.green;
M11[BlueChannel]+=(x-centroid[BlueChannel].x)*(y-
centroid[BlueChannel].y)*QuantumScale*pixel.blue;
M20[BlueChannel]+=(x-centroid[BlueChannel].x)*(x-
centroid[BlueChannel].x)*QuantumScale*pixel.blue;
M02[BlueChannel]+=(y-centroid[BlueChannel].y)*(y-
centroid[BlueChannel].y)*QuantumScale*pixel.blue;
M21[BlueChannel]+=(x-centroid[BlueChannel].x)*(x-
centroid[BlueChannel].x)*(y-centroid[BlueChannel].y)*QuantumScale*
pixel.blue;
M12[BlueChannel]+=(x-centroid[BlueChannel].x)*(y-
centroid[BlueChannel].y)*(y-centroid[BlueChannel].y)*QuantumScale*
pixel.blue;
M22[BlueChannel]+=(x-centroid[BlueChannel].x)*(x-
centroid[BlueChannel].x)*(y-centroid[BlueChannel].y)*(y-
centroid[BlueChannel].y)*QuantumScale*pixel.blue;
M30[BlueChannel]+=(x-centroid[BlueChannel].x)*(x-
centroid[BlueChannel].x)*(x-centroid[BlueChannel].x)*QuantumScale*
pixel.blue;
M03[BlueChannel]+=(y-centroid[BlueChannel].y)*(y-
centroid[BlueChannel].y)*(y-centroid[BlueChannel].y)*QuantumScale*
pixel.blue;
if (image->matte != MagickFalse)
{
M11[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(y-
centroid[OpacityChannel].y)*QuantumScale*pixel.opacity;
M20[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x-
centroid[OpacityChannel].x)*QuantumScale*pixel.opacity;
M02[OpacityChannel]+=(y-centroid[OpacityChannel].y)*(y-
centroid[OpacityChannel].y)*QuantumScale*pixel.opacity;
M21[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x-
centroid[OpacityChannel].x)*(y-centroid[OpacityChannel].y)*
QuantumScale*pixel.opacity;
M12[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(y-
centroid[OpacityChannel].y)*(y-centroid[OpacityChannel].y)*
QuantumScale*pixel.opacity;
M22[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x-
centroid[OpacityChannel].x)*(y-centroid[OpacityChannel].y)*(y-
centroid[OpacityChannel].y)*QuantumScale*pixel.opacity;
M30[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x-
centroid[OpacityChannel].x)*(x-centroid[OpacityChannel].x)*
QuantumScale*pixel.opacity;
M03[OpacityChannel]+=(y-centroid[OpacityChannel].y)*(y-
centroid[OpacityChannel].y)*(y-centroid[OpacityChannel].y)*
QuantumScale*pixel.opacity;
}
if (image->colorspace == CMYKColorspace)
{
M11[IndexChannel]+=(x-centroid[IndexChannel].x)*(y-
centroid[IndexChannel].y)*QuantumScale*pixel.index;
M20[IndexChannel]+=(x-centroid[IndexChannel].x)*(x-
centroid[IndexChannel].x)*QuantumScale*pixel.index;
M02[IndexChannel]+=(y-centroid[IndexChannel].y)*(y-
centroid[IndexChannel].y)*QuantumScale*pixel.index;
M21[IndexChannel]+=(x-centroid[IndexChannel].x)*(x-
centroid[IndexChannel].x)*(y-centroid[IndexChannel].y)*
QuantumScale*pixel.index;
M12[IndexChannel]+=(x-centroid[IndexChannel].x)*(y-
centroid[IndexChannel].y)*(y-centroid[IndexChannel].y)*
QuantumScale*pixel.index;
M22[IndexChannel]+=(x-centroid[IndexChannel].x)*(x-
centroid[IndexChannel].x)*(y-centroid[IndexChannel].y)*(y-
centroid[IndexChannel].y)*QuantumScale*pixel.index;
M30[IndexChannel]+=(x-centroid[IndexChannel].x)*(x-
centroid[IndexChannel].x)*(x-centroid[IndexChannel].x)*
QuantumScale*pixel.index;
M03[IndexChannel]+=(y-centroid[IndexChannel].y)*(y-
centroid[IndexChannel].y)*(y-centroid[IndexChannel].y)*
QuantumScale*pixel.index;
}
p++;
}
}
channels=3;
M00[CompositeChannels]+=(M00[RedChannel]+M00[GreenChannel]+M00[BlueChannel]);
M01[CompositeChannels]+=(M01[RedChannel]+M01[GreenChannel]+M01[BlueChannel]);
M02[CompositeChannels]+=(M02[RedChannel]+M02[GreenChannel]+M02[BlueChannel]);
M03[CompositeChannels]+=(M03[RedChannel]+M03[GreenChannel]+M03[BlueChannel]);
M10[CompositeChannels]+=(M10[RedChannel]+M10[GreenChannel]+M10[BlueChannel]);
M11[CompositeChannels]+=(M11[RedChannel]+M11[GreenChannel]+M11[BlueChannel]);
M12[CompositeChannels]+=(M12[RedChannel]+M12[GreenChannel]+M12[BlueChannel]);
M20[CompositeChannels]+=(M20[RedChannel]+M20[GreenChannel]+M20[BlueChannel]);
M21[CompositeChannels]+=(M21[RedChannel]+M21[GreenChannel]+M21[BlueChannel]);
M22[CompositeChannels]+=(M22[RedChannel]+M22[GreenChannel]+M22[BlueChannel]);
M30[CompositeChannels]+=(M30[RedChannel]+M30[GreenChannel]+M30[BlueChannel]);
if (image->matte != MagickFalse)
{
channels+=1;
M00[CompositeChannels]+=M00[OpacityChannel];
M01[CompositeChannels]+=M01[OpacityChannel];
M02[CompositeChannels]+=M02[OpacityChannel];
M03[CompositeChannels]+=M03[OpacityChannel];
M10[CompositeChannels]+=M10[OpacityChannel];
M11[CompositeChannels]+=M11[OpacityChannel];
M12[CompositeChannels]+=M12[OpacityChannel];
M20[CompositeChannels]+=M20[OpacityChannel];
M21[CompositeChannels]+=M21[OpacityChannel];
M22[CompositeChannels]+=M22[OpacityChannel];
M30[CompositeChannels]+=M30[OpacityChannel];
}
if (image->colorspace == CMYKColorspace)
{
channels+=1;
M00[CompositeChannels]+=M00[IndexChannel];
M01[CompositeChannels]+=M01[IndexChannel];
M02[CompositeChannels]+=M02[IndexChannel];
M03[CompositeChannels]+=M03[IndexChannel];
M10[CompositeChannels]+=M10[IndexChannel];
M11[CompositeChannels]+=M11[IndexChannel];
M12[CompositeChannels]+=M12[IndexChannel];
M20[CompositeChannels]+=M20[IndexChannel];
M21[CompositeChannels]+=M21[IndexChannel];
M22[CompositeChannels]+=M22[IndexChannel];
M30[CompositeChannels]+=M30[IndexChannel];
}
M00[CompositeChannels]/=(double) channels;
M01[CompositeChannels]/=(double) channels;
M02[CompositeChannels]/=(double) channels;
M03[CompositeChannels]/=(double) channels;
M10[CompositeChannels]/=(double) channels;
M11[CompositeChannels]/=(double) channels;
M12[CompositeChannels]/=(double) channels;
M20[CompositeChannels]/=(double) channels;
M21[CompositeChannels]/=(double) channels;
M22[CompositeChannels]/=(double) channels;
M30[CompositeChannels]/=(double) channels;
for (channel=0; channel <= CompositeChannels; channel++)
{
/*
Compute elliptical angle, major and minor axes, eccentricity, & intensity.
*/
channel_moments[channel].centroid=centroid[channel];
channel_moments[channel].ellipse_axis.x=sqrt((2.0/M00[channel])*
((M20[channel]+M02[channel])+sqrt(4.0*M11[channel]*M11[channel]+
(M20[channel]-M02[channel])*(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_axis.y=sqrt((2.0/M00[channel])*
((M20[channel]+M02[channel])-sqrt(4.0*M11[channel]*M11[channel]+
(M20[channel]-M02[channel])*(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_angle=RadiansToDegrees(0.5*atan(2.0*
M11[channel]/(M20[channel]-M02[channel]+MagickEpsilon)));
if (fabs(M11[channel]) < MagickEpsilon)
{
if (fabs(M20[channel]-M02[channel]) < MagickEpsilon)
channel_moments[channel].ellipse_angle+=0.0;
else
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=0.0;
}
else
if (M11[channel] < 0.0)
{
if (fabs(M20[channel]-M02[channel]) < MagickEpsilon)
channel_moments[channel].ellipse_angle+=0.0;
else
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=180.0;
}
else
{
if (fabs(M20[channel]-M02[channel]) < MagickEpsilon)
channel_moments[channel].ellipse_angle+=0.0;
else
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=0.0;
}
channel_moments[channel].ellipse_eccentricity=sqrt(1.0-(
channel_moments[channel].ellipse_axis.y/
(channel_moments[channel].ellipse_axis.x+MagickEpsilon)));
channel_moments[channel].ellipse_intensity=M00[channel]/
(MagickPI*channel_moments[channel].ellipse_axis.x*
channel_moments[channel].ellipse_axis.y+MagickEpsilon);
}
for (channel=0; channel <= CompositeChannels; channel++)
{
/*
Normalize image moments.
*/
M10[channel]=0.0;
M01[channel]=0.0;
M11[channel]/=pow(M00[channel],1.0+(1.0+1.0)/2.0);
M20[channel]/=pow(M00[channel],1.0+(2.0+0.0)/2.0);
M02[channel]/=pow(M00[channel],1.0+(0.0+2.0)/2.0);
M21[channel]/=pow(M00[channel],1.0+(2.0+1.0)/2.0);
M12[channel]/=pow(M00[channel],1.0+(1.0+2.0)/2.0);
M22[channel]/=pow(M00[channel],1.0+(2.0+2.0)/2.0);
M30[channel]/=pow(M00[channel],1.0+(3.0+0.0)/2.0);
M03[channel]/=pow(M00[channel],1.0+(0.0+3.0)/2.0);
M00[channel]=1.0;
}
for (channel=0; channel <= CompositeChannels; channel++)
{
/*
Compute Hu invariant moments.
*/
channel_moments[channel].I[0]=M20[channel]+M02[channel];
channel_moments[channel].I[1]=(M20[channel]-M02[channel])*
(M20[channel]-M02[channel])+4.0*M11[channel]*M11[channel];
channel_moments[channel].I[2]=(M30[channel]-3.0*M12[channel])*
(M30[channel]-3.0*M12[channel])+(3.0*M21[channel]-M03[channel])*
(3.0*M21[channel]-M03[channel]);
channel_moments[channel].I[3]=(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])+(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]);
channel_moments[channel].I[4]=(M30[channel]-3.0*M12[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))+(3.0*M21[channel]-M03[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].I[5]=(M20[channel]-M02[channel])*
((M30[channel]+M12[channel])*(M30[channel]+M12[channel])-
(M21[channel]+M03[channel])*(M21[channel]+M03[channel]))+
4.0*M11[channel]*(M30[channel]+M12[channel])*(M21[channel]+M03[channel]);
channel_moments[channel].I[6]=(3.0*M21[channel]-M03[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))-(M30[channel]-3*M12[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].I[7]=M11[channel]*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M03[channel]+M21[channel])*
(M03[channel]+M21[channel]))-(M20[channel]-M02[channel])*
(M30[channel]+M12[channel])*(M03[channel]+M21[channel]);
}
if (y < (ssize_t) image->rows)
channel_moments=(ChannelMoments *) RelinquishMagickMemory(channel_moments);
return(channel_moments);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l P e r c e p t u a l H a s h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelPerceptualHash() returns the perceptual hash of one or more
% image channels.
%
% The format of the GetImageChannelPerceptualHash method is:
%
% ChannelPerceptualHash *GetImageChannelPerceptualHash(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
MagickExport ChannelPerceptualHash *GetImageChannelPerceptualHash(
const Image *image,ExceptionInfo *exception)
{
ChannelMoments
*moments;
ChannelPerceptualHash
*perceptual_hash;
Image
*hash_image;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
channel;
/*
Blur then transform to sRGB colorspace.
*/
hash_image=BlurImage(image,0.0,1.0,exception);
if (hash_image == (Image *) NULL)
return((ChannelPerceptualHash *) NULL);
hash_image->depth=8;
status=TransformImageColorspace(hash_image,sRGBColorspace);
if (status == MagickFalse)
return((ChannelPerceptualHash *) NULL);
moments=GetImageChannelMoments(hash_image,exception);
hash_image=DestroyImage(hash_image);
if (moments == (ChannelMoments *) NULL)
return((ChannelPerceptualHash *) NULL);
perceptual_hash=(ChannelPerceptualHash *) AcquireQuantumMemory(
CompositeChannels+1UL,sizeof(*perceptual_hash));
if (perceptual_hash == (ChannelPerceptualHash *) NULL)
return((ChannelPerceptualHash *) NULL);
for (channel=0; channel <= CompositeChannels; channel++)
for (i=0; i < MaximumNumberOfImageMoments; i++)
perceptual_hash[channel].P[i]=(-MagickLog10(moments[channel].I[i]));
moments=(ChannelMoments *) RelinquishMagickMemory(moments);
/*
Blur then transform to HCLp colorspace.
*/
hash_image=BlurImage(image,0.0,1.0,exception);
if (hash_image == (Image *) NULL)
{
perceptual_hash=(ChannelPerceptualHash *) RelinquishMagickMemory(
perceptual_hash);
return((ChannelPerceptualHash *) NULL);
}
hash_image->depth=8;
status=TransformImageColorspace(hash_image,HCLpColorspace);
if (status == MagickFalse)
{
perceptual_hash=(ChannelPerceptualHash *) RelinquishMagickMemory(
perceptual_hash);
return((ChannelPerceptualHash *) NULL);
}
moments=GetImageChannelMoments(hash_image,exception);
hash_image=DestroyImage(hash_image);
if (moments == (ChannelMoments *) NULL)
{
perceptual_hash=(ChannelPerceptualHash *) RelinquishMagickMemory(
perceptual_hash);
return((ChannelPerceptualHash *) NULL);
}
for (channel=0; channel <= CompositeChannels; channel++)
for (i=0; i < MaximumNumberOfImageMoments; i++)
perceptual_hash[channel].Q[i]=(-MagickLog10(moments[channel].I[i]));
moments=(ChannelMoments *) RelinquishMagickMemory(moments);
return(perceptual_hash);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l R a n g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelRange() returns the range of one or more image channels.
%
% The format of the GetImageChannelRange method is:
%
% MagickBooleanType GetImageChannelRange(const Image *image,
% const ChannelType channel,double *minima,double *maxima,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageRange(const Image *image,
double *minima,double *maxima,ExceptionInfo *exception)
{
return(GetImageChannelRange(image,CompositeChannels,minima,maxima,exception));
}
MagickExport MagickBooleanType GetImageChannelRange(const Image *image,
const ChannelType channel,double *minima,double *maxima,
ExceptionInfo *exception)
{
MagickPixelPacket
pixel;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*maxima=(-MagickMaximumValue);
*minima=MagickMaximumValue;
GetMagickPixelPacket(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if ((channel & RedChannel) != 0)
{
if (pixel.red < *minima)
*minima=(double) pixel.red;
if (pixel.red > *maxima)
*maxima=(double) pixel.red;
}
if ((channel & GreenChannel) != 0)
{
if (pixel.green < *minima)
*minima=(double) pixel.green;
if (pixel.green > *maxima)
*maxima=(double) pixel.green;
}
if ((channel & BlueChannel) != 0)
{
if (pixel.blue < *minima)
*minima=(double) pixel.blue;
if (pixel.blue > *maxima)
*maxima=(double) pixel.blue;
}
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
{
if ((QuantumRange-pixel.opacity) < *minima)
*minima=(double) (QuantumRange-pixel.opacity);
if ((QuantumRange-pixel.opacity) > *maxima)
*maxima=(double) (QuantumRange-pixel.opacity);
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
if ((double) pixel.index < *minima)
*minima=(double) pixel.index;
if ((double) pixel.index > *maxima)
*maxima=(double) pixel.index;
}
p++;
}
}
return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l S t a t i s t i c s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelStatistics() returns statistics for each channel in the
% image. The statistics include the channel depth, its minima, maxima, mean,
% standard deviation, kurtosis and skewness. You can access the red channel
% mean, for example, like this:
%
% channel_statistics=GetImageChannelStatistics(image,exception);
% red_mean=channel_statistics[RedChannel].mean;
%
% Use MagickRelinquishMemory() to free the statistics buffer.
%
% The format of the GetImageChannelStatistics method is:
%
% ChannelStatistics *GetImageChannelStatistics(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ChannelStatistics *GetImageChannelStatistics(const Image *image,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
double
area,
standard_deviation;
MagickPixelPacket
number_bins,
*histogram;
QuantumAny
range;
register ssize_t
i;
size_t
channels,
depth,
length;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
length=CompositeChannels+1UL;
channel_statistics=(ChannelStatistics *) AcquireQuantumMemory(length,
sizeof(*channel_statistics));
histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1U,
sizeof(*histogram));
if ((channel_statistics == (ChannelStatistics *) NULL) ||
(histogram == (MagickPixelPacket *) NULL))
{
if (histogram != (MagickPixelPacket *) NULL)
histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram);
if (channel_statistics != (ChannelStatistics *) NULL)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
(void) memset(channel_statistics,0,length*
sizeof(*channel_statistics));
for (i=0; i <= (ssize_t) CompositeChannels; i++)
{
channel_statistics[i].depth=1;
channel_statistics[i].maxima=(-MagickMaximumValue);
channel_statistics[i].minima=MagickMaximumValue;
}
(void) memset(histogram,0,(MaxMap+1U)*sizeof(*histogram));
(void) memset(&number_bins,0,sizeof(number_bins));
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
/*
Compute pixel statistics.
*/
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; )
{
if (channel_statistics[RedChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[RedChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelRed(p),range) == MagickFalse)
{
channel_statistics[RedChannel].depth++;
continue;
}
}
if (channel_statistics[GreenChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[GreenChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelGreen(p),range) == MagickFalse)
{
channel_statistics[GreenChannel].depth++;
continue;
}
}
if (channel_statistics[BlueChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[BlueChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelBlue(p),range) == MagickFalse)
{
channel_statistics[BlueChannel].depth++;
continue;
}
}
if (image->matte != MagickFalse)
{
if (channel_statistics[OpacityChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[OpacityChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelAlpha(p),range) == MagickFalse)
{
channel_statistics[OpacityChannel].depth++;
continue;
}
}
}
if (image->colorspace == CMYKColorspace)
{
if (channel_statistics[BlackChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[BlackChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelIndex(indexes+x),range) == MagickFalse)
{
channel_statistics[BlackChannel].depth++;
continue;
}
}
}
if ((double) GetPixelRed(p) < channel_statistics[RedChannel].minima)
channel_statistics[RedChannel].minima=(double) GetPixelRed(p);
if ((double) GetPixelRed(p) > channel_statistics[RedChannel].maxima)
channel_statistics[RedChannel].maxima=(double) GetPixelRed(p);
channel_statistics[RedChannel].sum+=GetPixelRed(p);
channel_statistics[RedChannel].sum_squared+=(double) GetPixelRed(p)*
GetPixelRed(p);
channel_statistics[RedChannel].sum_cubed+=(double)
GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p);
channel_statistics[RedChannel].sum_fourth_power+=(double)
GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p);
if ((double) GetPixelGreen(p) < channel_statistics[GreenChannel].minima)
channel_statistics[GreenChannel].minima=(double) GetPixelGreen(p);
if ((double) GetPixelGreen(p) > channel_statistics[GreenChannel].maxima)
channel_statistics[GreenChannel].maxima=(double) GetPixelGreen(p);
channel_statistics[GreenChannel].sum+=GetPixelGreen(p);
channel_statistics[GreenChannel].sum_squared+=(double) GetPixelGreen(p)*
GetPixelGreen(p);
channel_statistics[GreenChannel].sum_cubed+=(double) GetPixelGreen(p)*
GetPixelGreen(p)*GetPixelGreen(p);
channel_statistics[GreenChannel].sum_fourth_power+=(double)
GetPixelGreen(p)*GetPixelGreen(p)*GetPixelGreen(p)*GetPixelGreen(p);
if ((double) GetPixelBlue(p) < channel_statistics[BlueChannel].minima)
channel_statistics[BlueChannel].minima=(double) GetPixelBlue(p);
if ((double) GetPixelBlue(p) > channel_statistics[BlueChannel].maxima)
channel_statistics[BlueChannel].maxima=(double) GetPixelBlue(p);
channel_statistics[BlueChannel].sum+=GetPixelBlue(p);
channel_statistics[BlueChannel].sum_squared+=(double) GetPixelBlue(p)*
GetPixelBlue(p);
channel_statistics[BlueChannel].sum_cubed+=(double) GetPixelBlue(p)*
GetPixelBlue(p)*GetPixelBlue(p);
channel_statistics[BlueChannel].sum_fourth_power+=(double)
GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p);
histogram[ScaleQuantumToMap(GetPixelRed(p))].red++;
histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++;
histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++;
if (image->matte != MagickFalse)
{
if ((double) GetPixelAlpha(p) < channel_statistics[OpacityChannel].minima)
channel_statistics[OpacityChannel].minima=(double) GetPixelAlpha(p);
if ((double) GetPixelAlpha(p) > channel_statistics[OpacityChannel].maxima)
channel_statistics[OpacityChannel].maxima=(double) GetPixelAlpha(p);
channel_statistics[OpacityChannel].sum+=GetPixelAlpha(p);
channel_statistics[OpacityChannel].sum_squared+=(double)
GetPixelAlpha(p)*GetPixelAlpha(p);
channel_statistics[OpacityChannel].sum_cubed+=(double)
GetPixelAlpha(p)*GetPixelAlpha(p)*GetPixelAlpha(p);
channel_statistics[OpacityChannel].sum_fourth_power+=(double)
GetPixelAlpha(p)*GetPixelAlpha(p)*GetPixelAlpha(p)*GetPixelAlpha(p);
histogram[ScaleQuantumToMap(GetPixelAlpha(p))].opacity++;
}
if (image->colorspace == CMYKColorspace)
{
if ((double) GetPixelIndex(indexes+x) < channel_statistics[BlackChannel].minima)
channel_statistics[BlackChannel].minima=(double)
GetPixelIndex(indexes+x);
if ((double) GetPixelIndex(indexes+x) > channel_statistics[BlackChannel].maxima)
channel_statistics[BlackChannel].maxima=(double)
GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum+=GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum_squared+=(double)
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum_cubed+=(double)
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum_fourth_power+=(double)
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x);
histogram[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index++;
}
x++;
p++;
}
}
for (i=0; i < (ssize_t) CompositeChannels; i++)
{
double
area,
mean,
standard_deviation;
/*
Normalize pixel statistics.
*/
area=PerceptibleReciprocal((double) image->columns*image->rows);
mean=channel_statistics[i].sum*area;
channel_statistics[i].sum=mean;
channel_statistics[i].sum_squared*=area;
channel_statistics[i].sum_cubed*=area;
channel_statistics[i].sum_fourth_power*=area;
channel_statistics[i].mean=mean;
channel_statistics[i].variance=channel_statistics[i].sum_squared;
standard_deviation=sqrt(channel_statistics[i].variance-(mean*mean));
area=PerceptibleReciprocal((double) image->columns*image->rows-1.0)*
((double) image->columns*image->rows);
standard_deviation=sqrt(area*standard_deviation*standard_deviation);
channel_statistics[i].standard_deviation=standard_deviation;
}
for (i=0; i < (ssize_t) (MaxMap+1U); i++)
{
if (histogram[i].red > 0.0)
number_bins.red++;
if (histogram[i].green > 0.0)
number_bins.green++;
if (histogram[i].blue > 0.0)
number_bins.blue++;
if ((image->matte != MagickFalse) && (histogram[i].opacity > 0.0))
number_bins.opacity++;
if ((image->colorspace == CMYKColorspace) && (histogram[i].index > 0.0))
number_bins.index++;
}
area=PerceptibleReciprocal((double) image->columns*image->rows);
for (i=0; i < (ssize_t) (MaxMap+1U); i++)
{
/*
Compute pixel entropy.
*/
histogram[i].red*=area;
channel_statistics[RedChannel].entropy+=-histogram[i].red*
MagickLog10(histogram[i].red)*
PerceptibleReciprocal(MagickLog10((double) number_bins.red));
histogram[i].green*=area;
channel_statistics[GreenChannel].entropy+=-histogram[i].green*
MagickLog10(histogram[i].green)*
PerceptibleReciprocal(MagickLog10((double) number_bins.green));
histogram[i].blue*=area;
channel_statistics[BlueChannel].entropy+=-histogram[i].blue*
MagickLog10(histogram[i].blue)*
PerceptibleReciprocal(MagickLog10((double) number_bins.blue));
if (image->matte != MagickFalse)
{
histogram[i].opacity*=area;
channel_statistics[OpacityChannel].entropy+=-histogram[i].opacity*
MagickLog10(histogram[i].opacity)*
PerceptibleReciprocal(MagickLog10((double) number_bins.opacity));
}
if (image->colorspace == CMYKColorspace)
{
histogram[i].index*=area;
channel_statistics[IndexChannel].entropy+=-histogram[i].index*
MagickLog10(histogram[i].index)*
PerceptibleReciprocal(MagickLog10((double) number_bins.index));
}
}
/*
Compute overall statistics.
*/
for (i=0; i < (ssize_t) CompositeChannels; i++)
{
channel_statistics[CompositeChannels].depth=(size_t) EvaluateMax((double)
channel_statistics[CompositeChannels].depth,(double)
channel_statistics[i].depth);
channel_statistics[CompositeChannels].minima=MagickMin(
channel_statistics[CompositeChannels].minima,
channel_statistics[i].minima);
channel_statistics[CompositeChannels].maxima=EvaluateMax(
channel_statistics[CompositeChannels].maxima,
channel_statistics[i].maxima);
channel_statistics[CompositeChannels].sum+=channel_statistics[i].sum;
channel_statistics[CompositeChannels].sum_squared+=
channel_statistics[i].sum_squared;
channel_statistics[CompositeChannels].sum_cubed+=
channel_statistics[i].sum_cubed;
channel_statistics[CompositeChannels].sum_fourth_power+=
channel_statistics[i].sum_fourth_power;
channel_statistics[CompositeChannels].mean+=channel_statistics[i].mean;
channel_statistics[CompositeChannels].variance+=
channel_statistics[i].variance-channel_statistics[i].mean*
channel_statistics[i].mean;
standard_deviation=sqrt(channel_statistics[i].variance-
(channel_statistics[i].mean*channel_statistics[i].mean));
area=PerceptibleReciprocal((double) image->columns*image->rows-1.0)*
((double) image->columns*image->rows);
standard_deviation=sqrt(area*standard_deviation*standard_deviation);
channel_statistics[CompositeChannels].standard_deviation=standard_deviation;
channel_statistics[CompositeChannels].entropy+=
channel_statistics[i].entropy;
}
channels=3;
if (image->matte != MagickFalse)
channels++;
if (image->colorspace == CMYKColorspace)
channels++;
channel_statistics[CompositeChannels].sum/=channels;
channel_statistics[CompositeChannels].sum_squared/=channels;
channel_statistics[CompositeChannels].sum_cubed/=channels;
channel_statistics[CompositeChannels].sum_fourth_power/=channels;
channel_statistics[CompositeChannels].mean/=channels;
channel_statistics[CompositeChannels].kurtosis/=channels;
channel_statistics[CompositeChannels].skewness/=channels;
channel_statistics[CompositeChannels].entropy/=channels;
i=CompositeChannels;
area=PerceptibleReciprocal((double) channels*image->columns*image->rows);
channel_statistics[i].variance=channel_statistics[i].sum_squared;
channel_statistics[i].mean=channel_statistics[i].sum;
standard_deviation=sqrt(channel_statistics[i].variance-
(channel_statistics[i].mean*channel_statistics[i].mean));
standard_deviation=sqrt(PerceptibleReciprocal((double) channels*
image->columns*image->rows-1.0)*channels*image->columns*image->rows*
standard_deviation*standard_deviation);
channel_statistics[i].standard_deviation=standard_deviation;
for (i=0; i <= (ssize_t) CompositeChannels; i++)
{
/*
Compute kurtosis & skewness statistics.
*/
standard_deviation=PerceptibleReciprocal(
channel_statistics[i].standard_deviation);
channel_statistics[i].skewness=(channel_statistics[i].sum_cubed-3.0*
channel_statistics[i].mean*channel_statistics[i].sum_squared+2.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation);
channel_statistics[i].kurtosis=(channel_statistics[i].sum_fourth_power-4.0*
channel_statistics[i].mean*channel_statistics[i].sum_cubed+6.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].sum_squared-3.0*channel_statistics[i].mean*
channel_statistics[i].mean*1.0*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation*standard_deviation)-3.0;
}
channel_statistics[CompositeChannels].mean=0.0;
channel_statistics[CompositeChannels].standard_deviation=0.0;
for (i=0; i < (ssize_t) CompositeChannels; i++)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[i].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[i].standard_deviation;
}
channel_statistics[CompositeChannels].mean/=(double) channels;
channel_statistics[CompositeChannels].standard_deviation/=(double) channels;
histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram);
if (y < (ssize_t) image->rows)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l y n o m i a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolynomialImage() returns a new image where each pixel is the sum of the
% pixels in the image sequence after applying its corresponding terms
% (coefficient and degree pairs).
%
% The format of the PolynomialImage method is:
%
% Image *PolynomialImage(const Image *images,const size_t number_terms,
% const double *terms,ExceptionInfo *exception)
% Image *PolynomialImageChannel(const Image *images,
% const size_t number_terms,const ChannelType channel,
% const double *terms,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o channel: the channel.
%
% o number_terms: the number of terms in the list. The actual list length
% is 2 x number_terms + 1 (the constant).
%
% o terms: the list of polynomial coefficients and degree pairs and a
% constant.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolynomialImage(const Image *images,
const size_t number_terms,const double *terms,ExceptionInfo *exception)
{
Image
*polynomial_image;
polynomial_image=PolynomialImageChannel(images,DefaultChannels,number_terms,
terms,exception);
return(polynomial_image);
}
MagickExport Image *PolynomialImageChannel(const Image *images,
const ChannelType channel,const size_t number_terms,const double *terms,
ExceptionInfo *exception)
{
#define PolynomialImageTag "Polynomial/Image"
CacheView
*polynomial_view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
**magick_restrict polynomial_pixels,
zero;
ssize_t
y;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImageCanvas(images,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
image=DestroyImage(image);
return((Image *) NULL);
}
polynomial_pixels=AcquirePixelThreadSet(images);
if (polynomial_pixels == (MagickPixelPacket **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Polynomial image pixels.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(images,&zero);
polynomial_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register IndexPacket
*magick_restrict polynomial_indexes;
register MagickPixelPacket
*polynomial_pixel;
register PixelPacket
*magick_restrict q;
register ssize_t
i,
x;
size_t
number_images;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(polynomial_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
polynomial_indexes=GetCacheViewAuthenticIndexQueue(polynomial_view);
polynomial_pixel=polynomial_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
polynomial_pixel[x]=zero;
next=images;
number_images=GetImageListLength(images);
for (i=0; i < (ssize_t) number_images; i++)
{
register const IndexPacket
*indexes;
register const PixelPacket
*p;
if (i >= (ssize_t) number_terms)
break;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
coefficient,
degree;
coefficient=terms[i << 1];
degree=terms[(i << 1)+1];
if ((channel & RedChannel) != 0)
polynomial_pixel[x].red+=coefficient*pow(QuantumScale*p->red,degree);
if ((channel & GreenChannel) != 0)
polynomial_pixel[x].green+=coefficient*pow(QuantumScale*p->green,
degree);
if ((channel & BlueChannel) != 0)
polynomial_pixel[x].blue+=coefficient*pow(QuantumScale*p->blue,
degree);
if ((channel & OpacityChannel) != 0)
polynomial_pixel[x].opacity+=coefficient*pow(QuantumScale*
(QuantumRange-p->opacity),degree);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
polynomial_pixel[x].index+=coefficient*pow(QuantumScale*indexes[x],
degree);
p++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumRange*polynomial_pixel[x].red));
SetPixelGreen(q,ClampToQuantum(QuantumRange*polynomial_pixel[x].green));
SetPixelBlue(q,ClampToQuantum(QuantumRange*polynomial_pixel[x].blue));
if (image->matte == MagickFalse)
SetPixelOpacity(q,ClampToQuantum(QuantumRange-QuantumRange*
polynomial_pixel[x].opacity));
else
SetPixelAlpha(q,ClampToQuantum(QuantumRange-QuantumRange*
polynomial_pixel[x].opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(polynomial_indexes+x,ClampToQuantum(QuantumRange*
polynomial_pixel[x].index));
q++;
}
if (SyncCacheViewAuthenticPixels(polynomial_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(images,PolynomialImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
polynomial_view=DestroyCacheView(polynomial_view);
polynomial_pixels=DestroyPixelThreadSet(images,polynomial_pixels);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t a t i s t i c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StatisticImage() makes each pixel the min / max / median / mode / etc. of
% the neighborhood of the specified width and height.
%
% The format of the StatisticImage method is:
%
% Image *StatisticImage(const Image *image,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
% Image *StatisticImageChannel(const Image *image,
% const ChannelType channel,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the image channel.
%
% o type: the statistic type (median, mode, etc.).
%
% o width: the width of the pixel neighborhood.
%
% o height: the height of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
#define ListChannels 5
typedef struct _ListNode
{
size_t
next[9],
count,
signature;
} ListNode;
typedef struct _SkipList
{
ssize_t
level;
ListNode
*nodes;
} SkipList;
typedef struct _PixelList
{
size_t
length,
seed,
signature;
SkipList
lists[ListChannels];
} PixelList;
static PixelList *DestroyPixelList(PixelList *pixel_list)
{
register ssize_t
i;
if (pixel_list == (PixelList *) NULL)
return((PixelList *) NULL);
for (i=0; i < ListChannels; i++)
if (pixel_list->lists[i].nodes != (ListNode *) NULL)
pixel_list->lists[i].nodes=(ListNode *) RelinquishAlignedMemory(
pixel_list->lists[i].nodes);
pixel_list=(PixelList *) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList **DestroyPixelListThreadSet(PixelList **pixel_list)
{
register ssize_t
i;
assert(pixel_list != (PixelList **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixel_list[i] != (PixelList *) NULL)
pixel_list[i]=DestroyPixelList(pixel_list[i]);
pixel_list=(PixelList **) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList *AcquirePixelList(const size_t width,const size_t height)
{
PixelList
*pixel_list;
register ssize_t
i;
pixel_list=(PixelList *) AcquireMagickMemory(sizeof(*pixel_list));
if (pixel_list == (PixelList *) NULL)
return(pixel_list);
(void) memset((void *) pixel_list,0,sizeof(*pixel_list));
pixel_list->length=width*height;
for (i=0; i < ListChannels; i++)
{
pixel_list->lists[i].nodes=(ListNode *) AcquireAlignedMemory(65537UL,
sizeof(*pixel_list->lists[i].nodes));
if (pixel_list->lists[i].nodes == (ListNode *) NULL)
return(DestroyPixelList(pixel_list));
(void) memset(pixel_list->lists[i].nodes,0,65537UL*
sizeof(*pixel_list->lists[i].nodes));
}
pixel_list->signature=MagickCoreSignature;
return(pixel_list);
}
static PixelList **AcquirePixelListThreadSet(const size_t width,
const size_t height)
{
PixelList
**pixel_list;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixel_list=(PixelList **) AcquireQuantumMemory(number_threads,
sizeof(*pixel_list));
if (pixel_list == (PixelList **) NULL)
return((PixelList **) NULL);
(void) memset(pixel_list,0,number_threads*sizeof(*pixel_list));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_list[i]=AcquirePixelList(width,height);
if (pixel_list[i] == (PixelList *) NULL)
return(DestroyPixelListThreadSet(pixel_list));
}
return(pixel_list);
}
static void AddNodePixelList(PixelList *pixel_list,const ssize_t channel,
const size_t color)
{
register SkipList
*list;
register ssize_t
level;
size_t
search,
update[9];
/*
Initialize the node.
*/
list=pixel_list->lists+channel;
list->nodes[color].signature=pixel_list->signature;
list->nodes[color].count=1;
/*
Determine where it belongs in the list.
*/
search=65536UL;
for (level=list->level; level >= 0; level--)
{
while (list->nodes[search].next[level] < color)
search=list->nodes[search].next[level];
update[level]=search;
}
/*
Generate a pseudo-random level for this node.
*/
for (level=0; ; level++)
{
pixel_list->seed=(pixel_list->seed*42893621L)+1L;
if ((pixel_list->seed & 0x300) != 0x300)
break;
}
if (level > 8)
level=8;
if (level > (list->level+2))
level=list->level+2;
/*
If we're raising the list's level, link back to the root node.
*/
while (level > list->level)
{
list->level++;
update[list->level]=65536UL;
}
/*
Link the node into the skip-list.
*/
do
{
list->nodes[color].next[level]=list->nodes[update[level]].next[level];
list->nodes[update[level]].next[level]=color;
} while (level-- > 0);
}
static void GetMaximumPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color,
maximum;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the maximum value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
maximum=list->nodes[color].next[0];
do
{
color=list->nodes[color].next[0];
if (color > maximum)
maximum=color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) maximum;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetMeanPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
MagickRealType
sum;
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the mean value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
sum=0.0;
do
{
color=list->nodes[color].next[0];
sum+=(MagickRealType) list->nodes[color].count*color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
channels[channel]=(unsigned short) sum;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetMedianPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the median value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
do
{
color=list->nodes[color].next[0];
count+=list->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
channels[channel]=(unsigned short) color;
}
GetMagickPixelPacket((const Image *) NULL,pixel);
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetMinimumPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color,
minimum;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the minimum value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
count=0;
color=65536UL;
minimum=list->nodes[color].next[0];
do
{
color=list->nodes[color].next[0];
if (color < minimum)
minimum=color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) minimum;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetModePixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color,
max_count,
mode;
ssize_t
count;
unsigned short
channels[5];
/*
Make each pixel the 'predominant color' of the specified neighborhood.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
mode=color;
max_count=list->nodes[mode].count;
count=0;
do
{
color=list->nodes[color].next[0];
if (list->nodes[color].count > max_count)
{
mode=color;
max_count=list->nodes[mode].count;
}
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) mode;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetNonpeakPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color,
next,
previous;
ssize_t
count;
unsigned short
channels[5];
/*
Finds the non peak value for each of the colors.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
next=list->nodes[color].next[0];
count=0;
do
{
previous=color;
color=next;
next=list->nodes[color].next[0];
count+=list->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
if ((previous == 65536UL) && (next != 65536UL))
color=next;
else
if ((previous != 65536UL) && (next == 65536UL))
color=previous;
channels[channel]=(unsigned short) color;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetRootMeanSquarePixelList(PixelList *pixel_list,
MagickPixelPacket *pixel)
{
MagickRealType
sum;
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the root mean square value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
sum=0.0;
do
{
color=list->nodes[color].next[0];
sum+=(MagickRealType) (list->nodes[color].count*color*color);
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
channels[channel]=(unsigned short) sqrt(sum);
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetStandardDeviationPixelList(PixelList *pixel_list,
MagickPixelPacket *pixel)
{
MagickRealType
sum,
sum_squared;
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the standard-deviation value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
sum=0.0;
sum_squared=0.0;
do
{
register ssize_t
i;
color=list->nodes[color].next[0];
sum+=(MagickRealType) list->nodes[color].count*color;
for (i=0; i < (ssize_t) list->nodes[color].count; i++)
sum_squared+=((MagickRealType) color)*((MagickRealType) color);
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
sum_squared/=pixel_list->length;
channels[channel]=(unsigned short) sqrt(sum_squared-(sum*sum));
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static inline void InsertPixelList(const Image *image,const PixelPacket *pixel,
const IndexPacket *indexes,PixelList *pixel_list)
{
size_t
signature;
unsigned short
index;
index=ScaleQuantumToShort(GetPixelRed(pixel));
signature=pixel_list->lists[0].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[0].nodes[index].count++;
else
AddNodePixelList(pixel_list,0,index);
index=ScaleQuantumToShort(GetPixelGreen(pixel));
signature=pixel_list->lists[1].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[1].nodes[index].count++;
else
AddNodePixelList(pixel_list,1,index);
index=ScaleQuantumToShort(GetPixelBlue(pixel));
signature=pixel_list->lists[2].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[2].nodes[index].count++;
else
AddNodePixelList(pixel_list,2,index);
index=ScaleQuantumToShort(GetPixelOpacity(pixel));
signature=pixel_list->lists[3].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[3].nodes[index].count++;
else
AddNodePixelList(pixel_list,3,index);
if (image->colorspace == CMYKColorspace)
index=ScaleQuantumToShort(GetPixelIndex(indexes));
signature=pixel_list->lists[4].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[4].nodes[index].count++;
else
AddNodePixelList(pixel_list,4,index);
}
static void ResetPixelList(PixelList *pixel_list)
{
int
level;
register ListNode
*root;
register SkipList
*list;
register ssize_t
channel;
/*
Reset the skip-list.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
root=list->nodes+65536UL;
list->level=0;
for (level=0; level < 9; level++)
root->next[level]=65536UL;
}
pixel_list->seed=pixel_list->signature++;
}
MagickExport Image *StatisticImage(const Image *image,const StatisticType type,
const size_t width,const size_t height,ExceptionInfo *exception)
{
Image
*statistic_image;
statistic_image=StatisticImageChannel(image,DefaultChannels,type,width,
height,exception);
return(statistic_image);
}
MagickExport Image *StatisticImageChannel(const Image *image,
const ChannelType channel,const StatisticType type,const size_t width,
const size_t height,ExceptionInfo *exception)
{
#define StatisticImageTag "Statistic/Image"
CacheView
*image_view,
*statistic_view;
Image
*statistic_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelList
**magick_restrict pixel_list;
size_t
neighbor_height,
neighbor_width;
ssize_t
y;
/*
Initialize statistics image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
statistic_image=CloneImage(image,0,0,MagickTrue,exception);
if (statistic_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(statistic_image,DirectClass) == MagickFalse)
{
InheritException(exception,&statistic_image->exception);
statistic_image=DestroyImage(statistic_image);
return((Image *) NULL);
}
neighbor_width=width == 0 ? GetOptimalKernelWidth2D((double) width,0.5) :
width;
neighbor_height=height == 0 ? GetOptimalKernelWidth2D((double) height,0.5) :
height;
pixel_list=AcquirePixelListThreadSet(neighbor_width,neighbor_height);
if (pixel_list == (PixelList **) NULL)
{
statistic_image=DestroyImage(statistic_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Make each pixel the min / max / median / mode / etc. of the neighborhood.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
statistic_view=AcquireAuthenticCacheView(statistic_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,statistic_image,statistic_image->rows,1)
#endif
for (y=0; y < (ssize_t) statistic_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict statistic_indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) neighbor_width/2L),y-
(ssize_t) (neighbor_height/2L),image->columns+neighbor_width,
neighbor_height,exception);
q=QueueCacheViewAuthenticPixels(statistic_view,0,y,statistic_image->columns, 1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
statistic_indexes=GetCacheViewAuthenticIndexQueue(statistic_view);
for (x=0; x < (ssize_t) statistic_image->columns; x++)
{
MagickPixelPacket
pixel;
register const IndexPacket
*magick_restrict s;
register const PixelPacket
*magick_restrict r;
register ssize_t
u,
v;
r=p;
s=indexes+x;
ResetPixelList(pixel_list[id]);
for (v=0; v < (ssize_t) neighbor_height; v++)
{
for (u=0; u < (ssize_t) neighbor_width; u++)
InsertPixelList(image,r+u,s+u,pixel_list[id]);
r+=image->columns+neighbor_width;
s+=image->columns+neighbor_width;
}
GetMagickPixelPacket(image,&pixel);
SetMagickPixelPacket(image,p+neighbor_width*neighbor_height/2,indexes+x+
neighbor_width*neighbor_height/2,&pixel);
switch (type)
{
case GradientStatistic:
{
MagickPixelPacket
maximum,
minimum;
GetMinimumPixelList(pixel_list[id],&pixel);
minimum=pixel;
GetMaximumPixelList(pixel_list[id],&pixel);
maximum=pixel;
pixel.red=MagickAbsoluteValue(maximum.red-minimum.red);
pixel.green=MagickAbsoluteValue(maximum.green-minimum.green);
pixel.blue=MagickAbsoluteValue(maximum.blue-minimum.blue);
pixel.opacity=MagickAbsoluteValue(maximum.opacity-minimum.opacity);
if (image->colorspace == CMYKColorspace)
pixel.index=MagickAbsoluteValue(maximum.index-minimum.index);
break;
}
case MaximumStatistic:
{
GetMaximumPixelList(pixel_list[id],&pixel);
break;
}
case MeanStatistic:
{
GetMeanPixelList(pixel_list[id],&pixel);
break;
}
case MedianStatistic:
default:
{
GetMedianPixelList(pixel_list[id],&pixel);
break;
}
case MinimumStatistic:
{
GetMinimumPixelList(pixel_list[id],&pixel);
break;
}
case ModeStatistic:
{
GetModePixelList(pixel_list[id],&pixel);
break;
}
case NonpeakStatistic:
{
GetNonpeakPixelList(pixel_list[id],&pixel);
break;
}
case RootMeanSquareStatistic:
{
GetRootMeanSquarePixelList(pixel_list[id],&pixel);
break;
}
case StandardDeviationStatistic:
{
GetStandardDeviationPixelList(pixel_list[id],&pixel);
break;
}
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(pixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(pixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(pixel.blue));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(statistic_indexes+x,ClampToQuantum(pixel.index));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(statistic_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,StatisticImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
statistic_view=DestroyCacheView(statistic_view);
image_view=DestroyCacheView(image_view);
pixel_list=DestroyPixelListThreadSet(pixel_list);
if (status == MagickFalse)
statistic_image=DestroyImage(statistic_image);
return(statistic_image);
}
|
Matrix.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Matrix - Matrix stored and accessible by rows. Indices and values for
* the matrix nonzeros are copied into the matrix a row at a time, in any
* order using the MatrixGetRow function. The MatrixPutRow function returns
* a pointer to the indices and values of a row. The matrix has a set of
* row and column indices such that these indices begin at "beg" and end
* at "end", where 0 <= "beg" <= "end". In other words, the matrix indices
* have any nonnegative base value, and the base values of the row and column
* indices must agree.
*
*****************************************************************************/
#include <stdlib.h>
//#include <memory.h>
#include "Common.h"
#include "Matrix.h"
#include "Numbering.h"
#define MAX_NZ_PER_ROW 1000
/*--------------------------------------------------------------------------
* MatrixCreate - Return (a pointer to) a matrix object.
*--------------------------------------------------------------------------*/
Matrix *MatrixCreate(MPI_Comm comm, HYPRE_Int beg_row, HYPRE_Int end_row)
{
HYPRE_Int num_rows, mype, npes;
Matrix *mat = hypre_TAlloc(Matrix, 1, HYPRE_MEMORY_HOST);
mat->comm = comm;
mat->beg_row = beg_row;
mat->end_row = end_row;
mat->mem = (Mem *) MemCreate();
num_rows = mat->end_row - mat->beg_row + 1;
mat->lens = (HYPRE_Int *) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Int));
mat->inds = (HYPRE_Int **) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Int *));
mat->vals = (HYPRE_Real **) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Real *));
/* Send beg_row and end_row to all processors */
/* This is needed in order to map row numbers to processors */
hypre_MPI_Comm_rank(comm, &mype);
hypre_MPI_Comm_size(comm, &npes);
mat->beg_rows = (HYPRE_Int *) MemAlloc(mat->mem, npes * sizeof(HYPRE_Int));
mat->end_rows = (HYPRE_Int *) MemAlloc(mat->mem, npes * sizeof(HYPRE_Int));
hypre_MPI_Allgather(&beg_row, 1, HYPRE_MPI_INT, mat->beg_rows, 1, HYPRE_MPI_INT, comm);
hypre_MPI_Allgather(&end_row, 1, HYPRE_MPI_INT, mat->end_rows, 1, HYPRE_MPI_INT, comm);
mat->num_recv = 0;
mat->num_send = 0;
mat->recv_req = NULL;
mat->send_req = NULL;
mat->recv_req2 = NULL;
mat->send_req2 = NULL;
mat->statuses = NULL;
mat->sendind = NULL;
mat->sendbuf = NULL;
mat->recvbuf = NULL;
mat->numb = NULL;
return mat;
}
/*--------------------------------------------------------------------------
* MatrixCreateLocal - Return (a pointer to) a matrix object.
* The matrix created by this call is a local matrix, not a global matrix.
*--------------------------------------------------------------------------*/
Matrix *MatrixCreateLocal(HYPRE_Int beg_row, HYPRE_Int end_row)
{
HYPRE_Int num_rows;
Matrix *mat = hypre_TAlloc(Matrix, 1, HYPRE_MEMORY_HOST);
mat->comm = hypre_MPI_COMM_NULL;
mat->beg_row = beg_row;
mat->end_row = end_row;
mat->mem = (Mem *) MemCreate();
num_rows = mat->end_row - mat->beg_row + 1;
mat->lens = (HYPRE_Int *) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Int));
mat->inds = (HYPRE_Int **) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Int *));
mat->vals = (HYPRE_Real **) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Real *));
/* Send beg_row and end_row to all processors */
/* This is needed in order to map row numbers to processors */
mat->beg_rows = NULL;
mat->end_rows = NULL;
mat->num_recv = 0;
mat->num_send = 0;
mat->recv_req = NULL;
mat->send_req = NULL;
mat->recv_req2 = NULL;
mat->send_req2 = NULL;
mat->statuses = NULL;
mat->sendind = NULL;
mat->sendbuf = NULL;
mat->recvbuf = NULL;
mat->numb = NULL;
return mat;
}
/*--------------------------------------------------------------------------
* MatrixDestroy - Destroy a matrix object "mat".
*--------------------------------------------------------------------------*/
void MatrixDestroy(Matrix *mat)
{
HYPRE_Int i;
for (i=0; i<mat->num_recv; i++)
hypre_MPI_Request_free(&mat->recv_req[i]);
for (i=0; i<mat->num_send; i++)
hypre_MPI_Request_free(&mat->send_req[i]);
for (i=0; i<mat->num_send; i++)
hypre_MPI_Request_free(&mat->recv_req2[i]);
for (i=0; i<mat->num_recv; i++)
hypre_MPI_Request_free(&mat->send_req2[i]);
hypre_TFree(mat->recv_req,HYPRE_MEMORY_HOST);
hypre_TFree(mat->send_req,HYPRE_MEMORY_HOST);
hypre_TFree(mat->recv_req2,HYPRE_MEMORY_HOST);
hypre_TFree(mat->send_req2,HYPRE_MEMORY_HOST);
hypre_TFree(mat->statuses,HYPRE_MEMORY_HOST);
hypre_TFree(mat->sendind,HYPRE_MEMORY_HOST);
hypre_TFree(mat->sendbuf,HYPRE_MEMORY_HOST);
hypre_TFree(mat->recvbuf,HYPRE_MEMORY_HOST);
MemDestroy(mat->mem);
if (mat->numb)
NumberingDestroy(mat->numb);
hypre_TFree(mat,HYPRE_MEMORY_HOST);
}
/*--------------------------------------------------------------------------
* MatrixSetRow - Set a row in a matrix. Only local rows can be set.
* Once a row has been set, it should not be set again, or else the
* memory used by the existing row will not be recovered until
* the matrix is destroyed. "row" is in global coordinate numbering.
*--------------------------------------------------------------------------*/
void MatrixSetRow(Matrix *mat, HYPRE_Int row, HYPRE_Int len, HYPRE_Int *ind, HYPRE_Real *val)
{
row -= mat->beg_row;
mat->lens[row] = len;
mat->inds[row] = (HYPRE_Int *) MemAlloc(mat->mem, len*sizeof(HYPRE_Int));
mat->vals[row] = (HYPRE_Real *) MemAlloc(mat->mem, len*sizeof(HYPRE_Real));
if (ind != NULL)
{
//hypre_TMemcpy(mat->inds[row], ind, HYPRE_Int, len, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
memcpy(mat->inds[row], ind, sizeof(HYPRE_Int) * len);
}
if (val != NULL)
{
//hypre_TMemcpy(mat->vals[row], val, HYPRE_Real, len, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
memcpy(mat->vals[row], val, sizeof(HYPRE_Real) * len);
}
}
/*--------------------------------------------------------------------------
* MatrixGetRow - Get a *local* row in a matrix.
*--------------------------------------------------------------------------*/
void MatrixGetRow(Matrix *mat, HYPRE_Int row, HYPRE_Int *lenp, HYPRE_Int **indp, HYPRE_Real **valp)
{
*lenp = mat->lens[row];
*indp = mat->inds[row];
*valp = mat->vals[row];
}
/*--------------------------------------------------------------------------
* MatrixRowPe - Map "row" to a processor number.
*--------------------------------------------------------------------------*/
HYPRE_Int MatrixRowPe(Matrix *mat, HYPRE_Int row)
{
HYPRE_Int npes, pe;
HYPRE_Int *beg = mat->beg_rows;
HYPRE_Int *end = mat->end_rows;
hypre_MPI_Comm_size(mat->comm, &npes);
for (pe=0; pe<npes; pe++)
{
if (row >= beg[pe] && row <= end[pe])
return pe;
}
hypre_printf("MatrixRowPe: could not map row %d.\n", row);
PARASAILS_EXIT;
return -1; /* for picky compilers */
}
/*--------------------------------------------------------------------------
* MatrixNnz - Return total number of nonzeros in preconditioner.
*--------------------------------------------------------------------------*/
HYPRE_Int MatrixNnz(Matrix *mat)
{
HYPRE_Int num_local, i, total, alltotal;
num_local = mat->end_row - mat->beg_row + 1;
total = 0;
for (i=0; i<num_local; i++)
total += mat->lens[i];
hypre_MPI_Allreduce(&total, &alltotal, 1, HYPRE_MPI_INT, hypre_MPI_SUM, mat->comm);
return alltotal;
}
/*--------------------------------------------------------------------------
* MatrixPrint - Print a matrix to a file "filename". Each processor
* appends to the file in order, but the file is overwritten if it exists.
*--------------------------------------------------------------------------*/
void MatrixPrint(Matrix *mat, char *filename)
{
HYPRE_Int mype, npes, pe;
HYPRE_Int row, i, len, *ind;
HYPRE_Real *val;
hypre_MPI_Comm_rank(mat->comm, &mype);
hypre_MPI_Comm_size(mat->comm, &npes);
for (pe=0; pe<npes; pe++)
{
hypre_MPI_Barrier(mat->comm);
if (mype == pe)
{
FILE *file = fopen(filename, (pe==0 ? "w" : "a"));
hypre_assert(file != NULL);
for (row=0; row<=mat->end_row - mat->beg_row; row++)
{
MatrixGetRow(mat, row, &len, &ind, &val);
for (i=0; i<len; i++)
hypre_fprintf(file, "%d %d %.14e\n",
row + mat->beg_row,
mat->numb->local_to_global[ind[i]], val[i]);
}
fclose(file);
}
}
}
/*--------------------------------------------------------------------------
* MatrixReadMaster - MatrixRead routine for processor 0. Internal use.
*--------------------------------------------------------------------------*/
static void MatrixReadMaster(Matrix *mat, char *filename)
{
MPI_Comm comm = mat->comm;
HYPRE_Int mype, npes;
FILE *file;
HYPRE_Int ret;
HYPRE_Int num_rows, curr_proc;
HYPRE_Int row, col;
HYPRE_Real value;
hypre_longint offset;
hypre_longint outbuf;
HYPRE_Int curr_row;
HYPRE_Int len;
HYPRE_Int ind[MAX_NZ_PER_ROW];
HYPRE_Real val[MAX_NZ_PER_ROW];
char line[100];
HYPRE_Int oldrow;
hypre_MPI_Request request;
hypre_MPI_Status status;
hypre_MPI_Comm_size(mat->comm, &npes);
hypre_MPI_Comm_rank(mat->comm, &mype);
file = fopen(filename, "r");
hypre_assert(file != NULL);
if (fgets(line, 100, file) == NULL)
{
hypre_fprintf(stderr, "Error reading file.\n");
PARASAILS_EXIT;
}
#ifdef EMSOLVE
ret = hypre_sscanf(line, "%*d %d %*d %*d", &num_rows);
for (row=0; row<num_rows; row++)
hypre_fscanf(file, "%*d");
#else
ret = hypre_sscanf(line, "%d %*d %*d", &num_rows);
#endif
offset = ftell(file);
hypre_fscanf(file, "%d %d %lf", &row, &col, &value);
request = hypre_MPI_REQUEST_NULL;
curr_proc = 1; /* proc for which we are looking for the beginning */
while (curr_proc < npes)
{
if (row == mat->beg_rows[curr_proc])
{
hypre_MPI_Wait(&request, &status);
outbuf = offset;
hypre_MPI_Isend(&outbuf, 1, hypre_MPI_LONG, curr_proc, 0, comm, &request);
curr_proc++;
}
offset = ftell(file);
oldrow = row;
hypre_fscanf(file, "%d %d %lf", &row, &col, &value);
if (oldrow > row)
{
hypre_fprintf(stderr, "Matrix file is not sorted by rows.\n");
PARASAILS_EXIT;
}
}
/* Now read our own part */
rewind(file);
if (fgets(line, 100, file) == NULL)
{
hypre_fprintf(stderr, "Error reading file.\n");
PARASAILS_EXIT;
}
#ifdef EMSOLVE
ret = hypre_sscanf(line, "%*d %d %*d %*d", &num_rows);
for (row=0; row<num_rows; row++)
hypre_fscanf(file, "%*d");
#else
ret = hypre_sscanf(line, "%d %*d %*d", &num_rows);
#endif
ret = hypre_fscanf(file, "%d %d %lf", &row, &col, &value);
curr_row = row;
len = 0;
while (ret != EOF && row <= mat->end_row)
{
if (row != curr_row)
{
/* store this row */
MatrixSetRow(mat, curr_row, len, ind, val);
curr_row = row;
/* reset row pointer */
len = 0;
}
if (len >= MAX_NZ_PER_ROW)
{
hypre_fprintf(stderr, "The matrix has exceeded %d\n", MAX_NZ_PER_ROW);
hypre_fprintf(stderr, "nonzeros per row. Internal buffers must be\n");
hypre_fprintf(stderr, "increased to continue.\n");
PARASAILS_EXIT;
}
ind[len] = col;
val[len] = value;
len++;
ret = hypre_fscanf(file, "%d %d %lf", &row, &col, &value);
}
/* Store the final row */
if (ret == EOF || row > mat->end_row)
MatrixSetRow(mat, mat->end_row, len, ind, val);
fclose(file);
hypre_MPI_Wait(&request, &status);
}
/*--------------------------------------------------------------------------
* MatrixReadSlave - MatrixRead routine for other processors. Internal use.
*--------------------------------------------------------------------------*/
static void MatrixReadSlave(Matrix *mat, char *filename)
{
MPI_Comm comm = mat->comm;
hypre_MPI_Status status;
HYPRE_Int mype;
FILE *file;
HYPRE_Int ret;
HYPRE_Int row, col;
HYPRE_Real value;
hypre_longint offset;
HYPRE_Int curr_row;
HYPRE_Int len;
HYPRE_Int ind[MAX_NZ_PER_ROW];
HYPRE_Real val[MAX_NZ_PER_ROW];
HYPRE_Real time0, time1;
file = fopen(filename, "r");
hypre_assert(file != NULL);
hypre_MPI_Comm_rank(mat->comm, &mype);
hypre_MPI_Recv(&offset, 1, hypre_MPI_LONG, 0, 0, comm, &status);
time0 = hypre_MPI_Wtime();
ret = fseek(file, offset, SEEK_SET);
hypre_assert(ret == 0);
ret = hypre_fscanf(file, "%d %d %lf", &row, &col, &value);
curr_row = row;
len = 0;
while (ret != EOF && row <= mat->end_row)
{
if (row != curr_row)
{
/* store this row */
MatrixSetRow(mat, curr_row, len, ind, val);
curr_row = row;
/* reset row pointer */
len = 0;
}
if (len >= MAX_NZ_PER_ROW)
{
hypre_fprintf(stderr, "The matrix has exceeded %d\n", MAX_NZ_PER_ROW);
hypre_fprintf(stderr, "nonzeros per row. Internal buffers must be\n");
hypre_fprintf(stderr, "increased to continue.\n");
PARASAILS_EXIT;
}
ind[len] = col;
val[len] = value;
len++;
ret = hypre_fscanf(file, "%d %d %lf", &row, &col, &value);
}
/* Store the final row */
if (ret == EOF || row > mat->end_row)
MatrixSetRow(mat, mat->end_row, len, ind, val);
fclose(file);
time1 = hypre_MPI_Wtime();
hypre_printf("%d: Time for slave read: %f\n", mype, time1-time0);
}
/*--------------------------------------------------------------------------
* MatrixRead - Read a matrix file "filename" from disk and store in the
* matrix "mat" which has already been created using MatrixCreate. The format
* assumes no nonzero rows, the rows are in order, and there will be at least
* one row per processor.
*--------------------------------------------------------------------------*/
void MatrixRead(Matrix *mat, char *filename)
{
HYPRE_Int mype;
HYPRE_Real time0, time1;
hypre_MPI_Comm_rank(mat->comm, &mype);
time0 = hypre_MPI_Wtime();
if (mype == 0)
MatrixReadMaster(mat, filename);
else
MatrixReadSlave(mat, filename);
time1 = hypre_MPI_Wtime();
hypre_printf("%d: Time for reading matrix: %f\n", mype, time1-time0);
MatrixComplete(mat);
}
/*--------------------------------------------------------------------------
* RhsRead - Read a right-hand side file "filename" from disk and store in the
* location pointed to by "rhs". "mat" is needed to provide the partitioning
* information. The expected format is: a header line (n, nrhs) followed
* by n values. Also allows isis format, indicated by 1 HYPRE_Int in first line.
*--------------------------------------------------------------------------*/
void RhsRead(HYPRE_Real *rhs, Matrix *mat, char *filename)
{
FILE *file;
hypre_MPI_Status status;
HYPRE_Int mype, npes;
HYPRE_Int num_rows, num_local, pe, i, converted;
HYPRE_Real *buffer = NULL;
HYPRE_Int buflen = 0;
char line[100];
HYPRE_Int dummy;
hypre_MPI_Comm_size(mat->comm, &npes);
hypre_MPI_Comm_rank(mat->comm, &mype);
num_local = mat->end_row - mat->beg_row + 1;
if (mype != 0)
{
hypre_MPI_Recv(rhs, num_local, hypre_MPI_REAL, 0, 0, mat->comm, &status);
return;
}
file = fopen(filename, "r");
hypre_assert(file != NULL);
if (fgets(line, 100, file) == NULL)
{
hypre_fprintf(stderr, "Error reading file.\n");
PARASAILS_EXIT;
}
converted = hypre_sscanf(line, "%d %d", &num_rows, &dummy);
hypre_assert(num_rows == mat->end_rows[npes-1]);
/* Read own rows first */
for (i=0; i<num_local; i++)
if (converted == 1) /* isis format */
hypre_fscanf(file, "%*d %lf", &rhs[i]);
else
hypre_fscanf(file, "%lf", &rhs[i]);
for (pe=1; pe<npes; pe++)
{
num_local = mat->end_rows[pe] - mat->beg_rows[pe]+ 1;
if (buflen < num_local)
{
hypre_TFree(buffer,HYPRE_MEMORY_HOST);
buflen = num_local;
buffer = hypre_TAlloc(HYPRE_Real, buflen , HYPRE_MEMORY_HOST);
}
for (i=0; i<num_local; i++)
if (converted == 1) /* isis format */
hypre_fscanf(file, "%*d %lf", &buffer[i]);
else
hypre_fscanf(file, "%lf", &buffer[i]);
hypre_MPI_Send(buffer, num_local, hypre_MPI_REAL, pe, 0, mat->comm);
}
hypre_TFree(buffer,HYPRE_MEMORY_HOST);
}
/*--------------------------------------------------------------------------
* SetupReceives
*--------------------------------------------------------------------------*/
static void SetupReceives(Matrix *mat, HYPRE_Int reqlen, HYPRE_Int *reqind, HYPRE_Int *outlist)
{
HYPRE_Int i, j, this_pe, mype;
hypre_MPI_Request request;
MPI_Comm comm = mat->comm;
HYPRE_Int num_local = mat->end_row - mat->beg_row + 1;
hypre_MPI_Comm_rank(comm, &mype);
mat->num_recv = 0;
/* Allocate recvbuf */
/* recvbuf has numlocal entires saved for local part of x, used in matvec */
mat->recvlen = reqlen; /* used for the transpose multiply */
mat->recvbuf = hypre_TAlloc(HYPRE_Real, (reqlen+num_local) , HYPRE_MEMORY_HOST);
for (i=0; i<reqlen; i=j) /* j is set below */
{
/* The processor that owns the row with index reqind[i] */
this_pe = MatrixRowPe(mat, reqind[i]);
/* Figure out other rows we need from this_pe */
for (j=i+1; j<reqlen; j++)
{
/* if row is on different pe */
if (reqind[j] < mat->beg_rows[this_pe] ||
reqind[j] > mat->end_rows[this_pe])
break;
}
/* Request rows in reqind[i..j-1] */
hypre_MPI_Isend(&reqind[i], j-i, HYPRE_MPI_INT, this_pe, 444, comm, &request);
hypre_MPI_Request_free(&request);
/* Count of number of number of indices needed from this_pe */
outlist[this_pe] = j-i;
hypre_MPI_Recv_init(&mat->recvbuf[i+num_local], j-i, hypre_MPI_REAL, this_pe, 555,
comm, &mat->recv_req[mat->num_recv]);
hypre_MPI_Send_init(&mat->recvbuf[i+num_local], j-i, hypre_MPI_REAL, this_pe, 666,
comm, &mat->send_req2[mat->num_recv]);
mat->num_recv++;
}
}
/*--------------------------------------------------------------------------
* SetupSends
* This function will wait for all receives to complete.
*--------------------------------------------------------------------------*/
static void SetupSends(Matrix *mat, HYPRE_Int *inlist)
{
HYPRE_Int i, j, mype, npes;
hypre_MPI_Request *requests;
hypre_MPI_Status *statuses;
MPI_Comm comm = mat->comm;
hypre_MPI_Comm_rank(comm, &mype);
hypre_MPI_Comm_size(comm, &npes);
requests = hypre_TAlloc(hypre_MPI_Request, npes , HYPRE_MEMORY_HOST);
statuses = hypre_TAlloc(hypre_MPI_Status, npes , HYPRE_MEMORY_HOST);
/* Determine size of and allocate sendbuf and sendind */
mat->sendlen = 0;
for (i=0; i<npes; i++)
mat->sendlen += inlist[i];
mat->sendbuf = NULL;
mat->sendind = NULL;
if (mat->sendlen)
{
mat->sendbuf = hypre_TAlloc(HYPRE_Real, mat->sendlen , HYPRE_MEMORY_HOST);
mat->sendind = hypre_TAlloc(HYPRE_Int, mat->sendlen , HYPRE_MEMORY_HOST);
}
j = 0;
mat->num_send = 0;
for (i=0; i<npes; i++)
{
if (inlist[i] != 0)
{
/* Post receive for the actual indices */
hypre_MPI_Irecv(&mat->sendind[j], inlist[i], HYPRE_MPI_INT, i, 444, comm,
&requests[mat->num_send]);
/* Set up the send */
hypre_MPI_Send_init(&mat->sendbuf[j], inlist[i], hypre_MPI_REAL, i, 555, comm,
&mat->send_req[mat->num_send]);
/* Set up the receive for the transpose */
hypre_MPI_Recv_init(&mat->sendbuf[j], inlist[i], hypre_MPI_REAL, i, 666, comm,
&mat->recv_req2[mat->num_send]);
mat->num_send++;
j += inlist[i];
}
}
hypre_MPI_Waitall(mat->num_send, requests, statuses);
hypre_TFree(requests,HYPRE_MEMORY_HOST);
hypre_TFree(statuses,HYPRE_MEMORY_HOST);
/* convert global indices to local indices */
/* these are all indices on this processor */
for (i=0; i<mat->sendlen; i++)
mat->sendind[i] -= mat->beg_row;
}
/*--------------------------------------------------------------------------
* MatrixComplete
*--------------------------------------------------------------------------*/
void MatrixComplete(Matrix *mat)
{
HYPRE_Int mype, npes;
HYPRE_Int *outlist, *inlist;
HYPRE_Int row, len, *ind;
HYPRE_Real *val;
hypre_MPI_Comm_rank(mat->comm, &mype);
hypre_MPI_Comm_size(mat->comm, &npes);
mat->recv_req = hypre_TAlloc(hypre_MPI_Request, npes , HYPRE_MEMORY_HOST);
mat->send_req = hypre_TAlloc(hypre_MPI_Request, npes , HYPRE_MEMORY_HOST);
mat->recv_req2 = hypre_TAlloc(hypre_MPI_Request, npes , HYPRE_MEMORY_HOST);
mat->send_req2 = hypre_TAlloc(hypre_MPI_Request, npes , HYPRE_MEMORY_HOST);
mat->statuses = hypre_TAlloc(hypre_MPI_Status, npes , HYPRE_MEMORY_HOST);
outlist = hypre_CTAlloc(HYPRE_Int, npes, HYPRE_MEMORY_HOST);
inlist = hypre_CTAlloc(HYPRE_Int, npes, HYPRE_MEMORY_HOST);
/* Create Numbering object */
mat->numb = NumberingCreate(mat, PARASAILS_NROWS);
SetupReceives(mat, mat->numb->num_ind - mat->numb->num_loc,
&mat->numb->local_to_global[mat->numb->num_loc], outlist);
hypre_MPI_Alltoall(outlist, 1, HYPRE_MPI_INT, inlist, 1, HYPRE_MPI_INT, mat->comm);
SetupSends(mat, inlist);
hypre_TFree(outlist,HYPRE_MEMORY_HOST);
hypre_TFree(inlist,HYPRE_MEMORY_HOST);
/* Convert to local indices */
for (row=0; row<=mat->end_row - mat->beg_row; row++)
{
MatrixGetRow(mat, row, &len, &ind, &val);
NumberingGlobalToLocal(mat->numb, len, ind, ind);
}
}
/*--------------------------------------------------------------------------
* MatrixMatvec
* Can be done in place.
*--------------------------------------------------------------------------*/
void MatrixMatvec(Matrix *mat, HYPRE_Real *x, HYPRE_Real *y)
{
HYPRE_Int row, i, len, *ind;
HYPRE_Real *val, temp;
HYPRE_Int num_local = mat->end_row - mat->beg_row + 1;
/* Set up persistent communications */
/* Assumes MatrixComplete has been called */
/* Put components of x into the right outgoing buffers */
for (i=0; i<mat->sendlen; i++)
mat->sendbuf[i] = x[mat->sendind[i]];
hypre_MPI_Startall(mat->num_recv, mat->recv_req);
hypre_MPI_Startall(mat->num_send, mat->send_req);
/* Copy local part of x into top part of recvbuf */
for (i=0; i<num_local; i++)
mat->recvbuf[i] = x[i];
hypre_MPI_Waitall(mat->num_recv, mat->recv_req, mat->statuses);
/* do the multiply */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(row,len,ind,val,temp,i) schedule(static)
#endif
for (row=0; row<=mat->end_row - mat->beg_row; row++)
{
MatrixGetRow(mat, row, &len, &ind, &val);
temp = 0.0;
for (i=0; i<len; i++)
{
temp = temp + val[i] * mat->recvbuf[ind[i]];
}
y[row] = temp;
}
hypre_MPI_Waitall(mat->num_send, mat->send_req, mat->statuses);
}
void MatrixMatvecSerial(Matrix *mat, HYPRE_Real *x, HYPRE_Real *y)
{
HYPRE_Int row, i, len, *ind;
HYPRE_Real *val, temp;
HYPRE_Int num_local = mat->end_row - mat->beg_row + 1;
/* Set up persistent communications */
/* Assumes MatrixComplete has been called */
/* Put components of x into the right outgoing buffers */
for (i=0; i<mat->sendlen; i++)
mat->sendbuf[i] = x[mat->sendind[i]];
hypre_MPI_Startall(mat->num_recv, mat->recv_req);
hypre_MPI_Startall(mat->num_send, mat->send_req);
/* Copy local part of x into top part of recvbuf */
for (i=0; i<num_local; i++)
mat->recvbuf[i] = x[i];
hypre_MPI_Waitall(mat->num_recv, mat->recv_req, mat->statuses);
/* do the multiply */
for (row=0; row<=mat->end_row - mat->beg_row; row++)
{
MatrixGetRow(mat, row, &len, &ind, &val);
temp = 0.0;
for (i=0; i<len; i++)
{
temp = temp + val[i] * mat->recvbuf[ind[i]];
}
y[row] = temp;
}
hypre_MPI_Waitall(mat->num_send, mat->send_req, mat->statuses);
}
/*--------------------------------------------------------------------------
* MatrixMatvecTrans
* Can be done in place.
*--------------------------------------------------------------------------*/
void MatrixMatvecTrans(Matrix *mat, HYPRE_Real *x, HYPRE_Real *y)
{
HYPRE_Int row, i, len, *ind;
HYPRE_Real *val;
HYPRE_Int num_local = mat->end_row - mat->beg_row + 1;
/* Set up persistent communications */
/* Assumes MatrixComplete has been called */
/* Post receives for local parts of the solution y */
hypre_MPI_Startall(mat->num_send, mat->recv_req2);
/* initialize accumulator buffer to zero */
for (i=0; i<mat->recvlen+num_local; i++)
mat->recvbuf[i] = 0.0;
/* do the multiply */
for (row=0; row<=mat->end_row - mat->beg_row; row++)
{
MatrixGetRow(mat, row, &len, &ind, &val);
for (i=0; i<len; i++)
{
mat->recvbuf[ind[i]] += val[i] * x[row];
}
}
/* Now can send nonlocal parts of solution to other procs */
hypre_MPI_Startall(mat->num_recv, mat->send_req2);
/* copy local part of solution into y */
for (i=0; i<num_local; i++)
y[i] = mat->recvbuf[i];
/* alternatively, loop over a wait any */
hypre_MPI_Waitall(mat->num_send, mat->recv_req2, mat->statuses);
/* add all the incoming partial sums to y */
for (i=0; i<mat->sendlen; i++)
y[mat->sendind[i]] += mat->sendbuf[i];
hypre_MPI_Waitall(mat->num_recv, mat->send_req2, mat->statuses);
}
|
3d.np.c | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <omp.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define myabs(x,y) (((x) > (y))? ((x)-(y)) : ((y)-(x)))
#if !defined(point)
#define point 7
#endif
#if point == 27
#define kernel(A) A[(t+1)%2][x][y][z] = 0.54 * (A[(t)%2][x][y][z]) + \
0.03 * (A[(t)%2][x][y][z-1] + A[(t)%2][x][y-1][z] + \
A[(t)%2][x][y+1][z] + A[(t)%2][x][y][z+1] + \
A[(t)%2][x-1][y][z] + A[(t)%2][x+1][y][z])+ \
0.01 * (A[(t)%2][x-1][y][z-1] + A[(t)%2][x-1][y-1][z] + \
A[(t)%2][x-1][y+1][z] + A[(t)%2][x-1][y][z+1] + \
A[(t)%2][x][y-1][z-1] + A[(t)%2][x][y+1][z-1] + \
A[(t)%2][x][y-1][z+1] + A[(t)%2][x][y+1][z+1] + \
A[(t)%2][x+1][y][z-1] + A[(t)%2][x+1][y-1][z] + \
A[(t)%2][x+1][y+1][z] + A[(t)%2][x+1][y][z+1])+ \
0.02 * (A[(t)%2][x-1][y-1][z-1] + A[(t)%2][x-1][y+1][z-1] + \
A[(t)%2][x-1][y-1][z+1] + A[(t)%2][x-1][y+1][z+1] + \
A[(t)%2][x+1][y-1][z-1] + A[(t)%2][x+1][y+1][z-1] + \
A[(t)%2][x+1][y-1][z+1] + A[(t)%2][x+1][y+1][z+1]);
#define XSLOPE 1
#define YSLOPE 1
#define ZSLOPE 1
#elif point == 7
#define kernel(A) A[(t+1)%2][x][y][z] = 0.64 * (A[t%2][x][y][z]) + \
0.06 * (A[t%2][x - 1][y][z] + A[t%2][x][y - 1][z] + \
A[t%2][x][y][z - 1] + A[t%2][x + 1][y][z] + \
A[t%2][x][y + 1][z] + A[t%2][x][y][z + 1]);
#define XSLOPE 1
#define YSLOPE 1
#define ZSLOPE 1
#endif
#ifdef CHECK
#define TOLERANCE 0
#endif
int main(int argc, char * argv[]) {
struct timeval start, end;
long int t, i, j, k;
int NX = atoi(argv[1]);
int NY = atoi(argv[2]);
int NZ = atoi(argv[3]);
int T = atoi(argv[4]);
int Bx = atoi(argv[5]);
int By = atoi(argv[6]);
int tb = atoi(argv[7]);
if(Bx<(2*XSLOPE+1) || By<(2*YSLOPE+1) || Bx>NX || By>NY || tb>min(((Bx-1)/2)/XSLOPE,((By-1)/2)/YSLOPE)){
return 0;
}
double (*A)[NX+2*XSLOPE][NY+2*YSLOPE][NZ+2*ZSLOPE] = (double (*)[NX+2*XSLOPE][NY+2*YSLOPE][NZ+2*ZSLOPE])malloc(sizeof(double)*(NX+2*XSLOPE)*(NY+2*YSLOPE)*(NZ+2*ZSLOPE)*2);
if(NULL == A) return 0;
#ifdef CHECK
double (*B)[NX+2*XSLOPE][NY+2*YSLOPE][NZ+2*ZSLOPE] = (double (*)[NX+2*XSLOPE][NY+2*YSLOPE][NZ+2*ZSLOPE])malloc(sizeof(double)*(NX+2*XSLOPE)*(NY+2*YSLOPE)*(NZ+2*ZSLOPE)*2);
if(NULL == B) return 0;
#endif
srand(100);
for (i = 0; i < NX+2*XSLOPE; i++) {
for (j = 0; j < NY+2*YSLOPE; j++) {
for (k = 0; k < NZ+2*ZSLOPE; k++) {
A[0][i][j][k] = (double) (1.0 * (rand() % 1024));
A[1][i][j][k] = 0;
#ifdef CHECK
B[0][i][j][k] = A[0][i][j][k];
B[1][i][j][k] = 0;
#endif
}
}
}
int bx = Bx-2*(tb*XSLOPE);
int by = By-2*(tb*YSLOPE);
int ix = Bx+bx;
int iy = By+by;
int xnb0 = ceild(NX,ix);
int ynb0 = ceild(NY,iy);
int xnb11 = ceild(NX-ix/2+1,ix) + 1;
int ynb12 = ceild(NY-iy/2+1,iy) + 1;
int xnb12 = xnb0;
int ynb11 = ynb0;
int xnb2 = max(xnb11,xnb0);
int ynb2 = max(ynb12,ynb0);
int nb1[2] = {xnb12 * ynb12, xnb11 * ynb11};
int nb02[2] = {xnb2 * ynb2, xnb0 * ynb0};
int xnb1[2] = {xnb12, xnb11};
int xnb02[2] = {xnb2, xnb0};
int xleft02[2] = {XSLOPE-bx, XSLOPE+(Bx-bx)/2};
int ybottom02[2] = {YSLOPE-by, YSLOPE+(By-by)/2};
int xleft11[2] = {XSLOPE+(Bx-bx)/2, XSLOPE - bx};
int ybottom11[2] = {YSLOPE-(By+by)/2, YSLOPE};
int xleft12[2] = {XSLOPE-(Bx+bx)/2, XSLOPE};
int ybottom12[2] = {YSLOPE+(By-by)/2, YSLOPE-by};
int level = 1;
int tt,n;
int x, y, z;
int ymin, ymax;
int xmin, xmax;
gettimeofday(&start,0);
for(tt =- tb; tt < T; tt += tb){
#pragma omp parallel for schedule(dynamic) private(xmin,xmax,ymin,ymax,t,x,y)
for(n = 0; n < nb02[level]; n++){
for(t = max(tt,0); t < min(tt + 2*tb, T); t++) {
xmin = max( XSLOPE, xleft02[level] + (n%xnb02[level]) * ix - tb*XSLOPE + myabs(t+1,tt+tb) * XSLOPE);
xmax = min(NX+XSLOPE, xleft02[level] + (n%xnb02[level]) * ix + bx + tb*XSLOPE - myabs(t+1,tt+tb) * XSLOPE);
ymin = max( YSLOPE, ybottom02[level] + (n/xnb02[level]) * iy - tb*YSLOPE + myabs(t+1,tt+tb) * YSLOPE);
ymax = min(NY+YSLOPE, ybottom02[level] + (n/xnb02[level]) * iy + by + tb*YSLOPE - myabs(t+1,tt+tb) * YSLOPE);
for(x = xmin; x < xmax; x++) {
for(y = ymin; y < ymax; y++) {
#pragma ivdep
#pragma vector always
for (z = ZSLOPE; z < NZ+ZSLOPE; z++) {
kernel(A);
}
}
}
}
}
#pragma omp parallel for schedule(dynamic) private(xmin,xmax,ymin,ymax,t,x,y)
for(n = 0; n < nb1[0] + nb1[1]; n++) {
for(t = tt + tb; t < min(tt + 2*tb, T); t++) {
if(n < nb1[level]) {
xmin = max( XSLOPE, xleft11[level] + (n%xnb1[level]) * ix - (t+1-tt-tb) * XSLOPE);
xmax = min(NX + XSLOPE, xleft11[level] + (n%xnb1[level]) * ix + bx + (t+1-tt-tb) * XSLOPE);
ymin = max( YSLOPE, ybottom11[level] + (n/xnb1[level]) * iy + (t+1-tt-tb) * YSLOPE);
ymax = min(NY + YSLOPE, ybottom11[level] + (n/xnb1[level]) * iy + By - (t+1-tt-tb) * YSLOPE);
}
else {
xmin = max( XSLOPE, xleft12[level] + ((n-nb1[level])%xnb1[1-level]) * ix + (t+1-tt-tb) * XSLOPE);
xmax = min(NX + XSLOPE, xleft12[level] + ((n-nb1[level])%xnb1[1-level]) * ix + Bx - (t+1-tt-tb) * XSLOPE);
ymin = max( YSLOPE, ybottom12[level] + ((n-nb1[level])/xnb1[1-level]) * iy - (t+1-tt-tb) * YSLOPE);
ymax = min(NY + YSLOPE, ybottom12[level] + ((n-nb1[level])/xnb1[1-level]) * iy + by + (t+1-tt-tb) * YSLOPE);
}
for(x = xmin; x < xmax; x++) {
for(y = ymin; y < ymax; y++) {
#pragma ivdep
#pragma vector always
for (z = ZSLOPE; z < NZ+ZSLOPE; z++) {
kernel(A);
}
}
}
}
}
level = 1- level;
}
gettimeofday(&end,0);
printf("MStencil/s = %f\n", ((double)NX * NY * NZ * T) / (double)(end.tv_sec - start.tv_sec + (end.tv_usec - start.tv_usec) * 1.0e-6) / 1000000L);
#ifdef CHECK
for (t = 0; t < T; t++) {
for (x = XSLOPE; x < NX+XSLOPE; x++) {
for (y = YSLOPE; y < NY+YSLOPE; y++) {
for (z = ZSLOPE; z < NZ+ZSLOPE; z++) {
kernel(B);
}
}
}
}
for (i = XSLOPE; i < NX+XSLOPE; i++) {
for (j = YSLOPE; j < NY+YSLOPE; j++) {
for (k = ZSLOPE; k < NZ+ZSLOPE; k++) {
if(myabs(A[T%2][i][j][k], B[T%2][i][j][k]) > TOLERANCE)
printf("Naive[%d][%d][%d] = %f, Check() = %f: FAILED!\n", i, j, k, B[T%2][i][j][k], A[T%2][i][j][k]);
}
}
}
#endif
}
|
sparse_msg_restrict.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision: 2.14 $
***********************************************************************EHEADER*/
#include "_hypre_struct_ls.h"
/*--------------------------------------------------------------------------
* hypre_SparseMSGRestrictData data structure
*--------------------------------------------------------------------------*/
typedef struct
{
hypre_StructMatrix *R;
hypre_ComputePkg *compute_pkg;
hypre_Index cindex;
hypre_Index stride;
hypre_Index strideR;
HYPRE_Int time_index;
} hypre_SparseMSGRestrictData;
/*--------------------------------------------------------------------------
* hypre_SparseMSGRestrictCreate
*--------------------------------------------------------------------------*/
void *
hypre_SparseMSGRestrictCreate( )
{
hypre_SparseMSGRestrictData *restrict_data;
restrict_data = hypre_CTAlloc(hypre_SparseMSGRestrictData, 1);
(restrict_data -> time_index) = hypre_InitializeTiming("SparseMSGRestrict");
return (void *) restrict_data;
}
/*--------------------------------------------------------------------------
* hypre_SparseMSGRestrictSetup
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SparseMSGRestrictSetup( void *restrict_vdata,
hypre_StructMatrix *R,
hypre_StructVector *r,
hypre_StructVector *rc,
hypre_Index cindex,
hypre_Index findex,
hypre_Index stride,
hypre_Index strideR )
{
hypre_SparseMSGRestrictData *restrict_data = restrict_vdata;
hypre_StructGrid *grid;
hypre_StructStencil *stencil;
hypre_ComputeInfo *compute_info;
hypre_ComputePkg *compute_pkg;
HYPRE_Int ierr = 0;
/*----------------------------------------------------------
* Set up the compute package
*----------------------------------------------------------*/
grid = hypre_StructVectorGrid(r);
stencil = hypre_StructMatrixStencil(R);
hypre_CreateComputeInfo(grid, stencil, &compute_info);
hypre_ComputeInfoProjectSend(compute_info, findex, stride);
hypre_ComputeInfoProjectRecv(compute_info, findex, stride);
hypre_ComputeInfoProjectComp(compute_info, cindex, stride);
hypre_ComputePkgCreate(compute_info, hypre_StructVectorDataSpace(r), 1,
grid, &compute_pkg);
/*----------------------------------------------------------
* Set up the restrict data structure
*----------------------------------------------------------*/
(restrict_data -> R) = hypre_StructMatrixRef(R);
(restrict_data -> compute_pkg) = compute_pkg;
hypre_CopyIndex(cindex ,(restrict_data -> cindex));
hypre_CopyIndex(stride ,(restrict_data -> stride));
hypre_CopyIndex(strideR ,(restrict_data -> strideR));
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SparseMSGRestrict:
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SparseMSGRestrict( void *restrict_vdata,
hypre_StructMatrix *R,
hypre_StructVector *r,
hypre_StructVector *rc )
{
HYPRE_Int ierr = 0;
hypre_SparseMSGRestrictData *restrict_data = restrict_vdata;
hypre_ComputePkg *compute_pkg;
hypre_IndexRef cindex;
hypre_IndexRef stride;
hypre_IndexRef strideR;
hypre_StructGrid *fgrid;
HYPRE_Int *fgrid_ids;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
HYPRE_Int *cgrid_ids;
hypre_CommHandle *comm_handle;
hypre_BoxArrayArray *compute_box_aa;
hypre_BoxArray *compute_box_a;
hypre_Box *compute_box;
hypre_Box *R_dbox;
hypre_Box *r_dbox;
hypre_Box *rc_dbox;
HYPRE_Int Ri;
HYPRE_Int ri;
HYPRE_Int rci;
double *Rp0, *Rp1;
double *rp, *rp0, *rp1;
double *rcp;
hypre_Index loop_size;
hypre_IndexRef start;
hypre_Index startc;
hypre_Index startR;
hypre_Index stridec;
hypre_StructStencil *stencil;
hypre_Index *stencil_shape;
HYPRE_Int compute_i, fi, ci, j;
/*-----------------------------------------------------------------------
* Initialize some things.
*-----------------------------------------------------------------------*/
hypre_BeginTiming(restrict_data -> time_index);
compute_pkg = (restrict_data -> compute_pkg);
cindex = (restrict_data -> cindex);
stride = (restrict_data -> stride);
strideR = (restrict_data -> strideR);
stencil = hypre_StructMatrixStencil(R);
stencil_shape = hypre_StructStencilShape(stencil);
hypre_SetIndex(stridec, 1, 1, 1);
/*--------------------------------------------------------------------
* Restrict the residual.
*--------------------------------------------------------------------*/
fgrid = hypre_StructVectorGrid(r);
fgrid_ids = hypre_StructGridIDs(fgrid);
cgrid = hypre_StructVectorGrid(rc);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
cgrid_ids = hypre_StructGridIDs(cgrid);
for (compute_i = 0; compute_i < 2; compute_i++)
{
switch(compute_i)
{
case 0:
{
rp = hypre_StructVectorData(r);
hypre_InitializeIndtComputations(compute_pkg, rp, &comm_handle);
compute_box_aa = hypre_ComputePkgIndtBoxes(compute_pkg);
}
break;
case 1:
{
hypre_FinalizeIndtComputations(comm_handle);
compute_box_aa = hypre_ComputePkgDeptBoxes(compute_pkg);
}
break;
}
fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
compute_box_a = hypre_BoxArrayArrayBoxArray(compute_box_aa, fi);
R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi);
r_dbox = hypre_BoxArrayBox(hypre_StructVectorDataSpace(r), fi);
rc_dbox = hypre_BoxArrayBox(hypre_StructVectorDataSpace(rc), ci);
Rp0 = hypre_StructMatrixBoxData(R, fi, 1) -
hypre_BoxOffsetDistance(R_dbox, stencil_shape[1]);
Rp1 = hypre_StructMatrixBoxData(R, fi, 0);
rp = hypre_StructVectorBoxData(r, fi);
rp0 = rp + hypre_BoxOffsetDistance(r_dbox, stencil_shape[0]);
rp1 = rp + hypre_BoxOffsetDistance(r_dbox, stencil_shape[1]);
rcp = hypre_StructVectorBoxData(rc, ci);
hypre_ForBoxI(j, compute_box_a)
{
compute_box = hypre_BoxArrayBox(compute_box_a, j);
start = hypre_BoxIMin(compute_box);
hypre_StructMapFineToCoarse(start, cindex, stride, startc);
hypre_StructMapCoarseToFine(startc, cindex, strideR, startR);
hypre_BoxGetStrideSize(compute_box, stride, loop_size);
hypre_BoxLoop3Begin(hypre_StructMatrixDim(R), loop_size,
R_dbox, startR, strideR, Ri,
r_dbox, start, stride, ri,
rc_dbox, startc, stridec, rci);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ri,ri,rci) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop3For(Ri, ri, rci)
{
rcp[rci] = rp[ri] + (Rp0[Ri] * rp0[ri] +
Rp1[Ri] * rp1[ri]);
}
hypre_BoxLoop3End(Ri, ri, rci);
}
}
}
/*-----------------------------------------------------------------------
* Return
*-----------------------------------------------------------------------*/
hypre_IncFLOPCount(4*hypre_StructVectorGlobalSize(rc));
hypre_EndTiming(restrict_data -> time_index);
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SparseMSGRestrictDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SparseMSGRestrictDestroy( void *restrict_vdata )
{
HYPRE_Int ierr = 0;
hypre_SparseMSGRestrictData *restrict_data = restrict_vdata;
if (restrict_data)
{
hypre_StructMatrixDestroy(restrict_data -> R);
hypre_ComputePkgDestroy(restrict_data -> compute_pkg);
hypre_FinalizeTiming(restrict_data -> time_index);
hypre_TFree(restrict_data);
}
return ierr;
}
|
GridInit.c | #include "XSbench_header.h"
#ifdef MPI
#include<mpi.h>
#endif
// Generates randomized energy grid for each nuclide
// Note that this is done as part of initialization (serial), so
// rand() is used.
void generate_grids( NuclideGridPoint ** nuclide_grids,
long n_isotopes, long n_gridpoints ) {
for( long i = 0; i < n_isotopes; i++ )
for( long j = 0; j < n_gridpoints; j++ )
{
nuclide_grids[i][j].energy =((double)rand()/(double)RAND_MAX);
nuclide_grids[i][j].total_xs =((double)rand()/(double)RAND_MAX);
nuclide_grids[i][j].elastic_xs =((double)rand()/(double)RAND_MAX);
nuclide_grids[i][j].absorbtion_xs=((double)rand()/(double)RAND_MAX);
nuclide_grids[i][j].fission_xs =((double)rand()/(double)RAND_MAX);
nuclide_grids[i][j].nu_fission_xs=((double)rand()/(double)RAND_MAX);
}
}
// Verification version of this function (tighter control over RNG)
void generate_grids_v( NuclideGridPoint ** nuclide_grids,
long n_isotopes, long n_gridpoints ) {
for( long i = 0; i < n_isotopes; i++ )
for( long j = 0; j < n_gridpoints; j++ )
{
nuclide_grids[i][j].energy = rn_v();
nuclide_grids[i][j].total_xs = rn_v();
nuclide_grids[i][j].elastic_xs = rn_v();
nuclide_grids[i][j].absorbtion_xs= rn_v();
nuclide_grids[i][j].fission_xs = rn_v();
nuclide_grids[i][j].nu_fission_xs= rn_v();
}
}
// Sorts the nuclide grids by energy (lowest -> highest)
void sort_nuclide_grids( NuclideGridPoint ** nuclide_grids, long n_isotopes,
long n_gridpoints )
{
int (*cmp) (const void *, const void *);
cmp = NGP_compare;
for( long i = 0; i < n_isotopes; i++ )
qsort( nuclide_grids[i], n_gridpoints, sizeof(NuclideGridPoint),
cmp );
// error debug check
/*
for( int i = 0; i < n_isotopes; i++ )
{
printf("NUCLIDE %d ==============================\n", i);
for( int j = 0; j < n_gridpoints; j++ )
printf("E%d = %lf\n", j, nuclide_grids[i][j].energy);
}
*/
}
// Allocates unionized energy grid, and assigns union of energy levels
// from nuclide grids to it.
GridPoint * generate_energy_grid( long n_isotopes, long n_gridpoints,
NuclideGridPoint ** nuclide_grids) {
int mype = 0;
#ifdef MPI
MPI_Comm_rank(MPI_COMM_WORLD, &mype);
#endif
if( mype == 0 ) printf("Generating Unionized Energy Grid...\n");
long n_unionized_grid_points = n_isotopes*n_gridpoints;
int (*cmp) (const void *, const void *);
cmp = NGP_compare;
GridPoint * energy_grid = (GridPoint *)malloc( n_unionized_grid_points
* sizeof( GridPoint ) );
if( mype == 0 ) printf("Copying and Sorting all nuclide grids...\n");
NuclideGridPoint ** n_grid_sorted = gpmatrix( n_isotopes, n_gridpoints );
memcpy( n_grid_sorted[0], nuclide_grids[0], n_isotopes*n_gridpoints*
sizeof( NuclideGridPoint ) );
qsort( &n_grid_sorted[0][0], n_unionized_grid_points,
sizeof(NuclideGridPoint), cmp);
if( mype == 0 ) printf("Assigning energies to unionized grid...\n");
for( long i = 0; i < n_unionized_grid_points; i++ )
energy_grid[i].energy = n_grid_sorted[0][i].energy;
gpmatrix_free(n_grid_sorted);
int * full = (int *) malloc( n_isotopes * n_unionized_grid_points
* sizeof(int) );
if( full == NULL )
{
fprintf(stderr,"ERROR - Out Of Memory!\n");
exit(1);
}
for( long i = 0; i < n_unionized_grid_points; i++ )
energy_grid[i].xs_ptrs = &full[n_isotopes * i];
// debug error checking
/*
for( int i = 0; i < n_unionized_grid_points; i++ )
printf("E%d = %lf\n", i, energy_grid[i].energy);
*/
return energy_grid;
}
// Searches each nuclide grid for the closest energy level and assigns
// pointer from unionized grid to the correct spot in the nuclide grid.
// This process is time consuming, as the number of binary searches
// required is: binary searches = n_gridpoints * n_isotopes^2
void set_grid_ptrs( GridPoint * energy_grid, NuclideGridPoint ** nuclide_grids,
long n_isotopes, long n_gridpoints )
{
int mype = 0;
#ifdef MPI
MPI_Comm_rank(MPI_COMM_WORLD, &mype);
#endif
if( mype == 0 ) printf("Assigning pointers to Unionized Energy Grid...\n");
#pragma omp parallel for default(none) \
shared( energy_grid, nuclide_grids, n_isotopes, n_gridpoints, mype )
for( long i = 0; i < n_isotopes * n_gridpoints ; i++ )
{
double quarry = energy_grid[i].energy;
if( INFO && mype == 0 && omp_get_thread_num() == 0 && i % 200 == 0 )
printf("\rAligning Unionized Grid...(%.0lf%% complete)",
100.0 * (double) i / (n_isotopes*n_gridpoints /
omp_get_num_threads()) );
for( long j = 0; j < n_isotopes; j++ )
{
// j is the nuclide i.d.
// log n binary search
energy_grid[i].xs_ptrs[j] =
binary_search( nuclide_grids[j], quarry, n_gridpoints);
}
}
if( mype == 0 ) printf("\n");
//test
/*
for( int i=0; i < n_isotopes * n_gridpoints; i++ )
for( int j = 0; j < n_isotopes; j++ )
printf("E = %.4lf\tNuclide %d->%p->%.4lf\n",
energy_grid[i].energy,
j,
energy_grid[i].xs_ptrs[j],
(energy_grid[i].xs_ptrs[j])->energy
);
*/
}
|
level.c | /*******************************************************************************
Copyright (c) 2016 Advanced Micro Devices, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
//------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
//------------------------------------------------------------------------------------------------------------------------------
#ifdef USE_MPI
#include <mpi.h>
#endif
#ifdef _OPENMP
#include <omp.h>
#endif
//------------------------------------------------------------------------------------------------------------------------------
#include "timers.h"
#include "defines.h"
#include "level.h"
#include "operators.h"
//------------------------------------------------------------------------------------------------------------------------------
void print_communicator(int printSendRecv, int rank, int level, communicator_type *comm){
int i;
printf("rank=%2d level=%d ",rank,level);
if(printSendRecv & 0x1){
printf("num_sends=%2d ",comm->num_sends);
printf("send_ranks=[ ");for(i=0;i<comm->num_sends;i++)printf("%2d ",comm->send_ranks[i]);printf("] ");
printf("send_sizes=[ ");for(i=0;i<comm->num_sends;i++)printf("%2d ",comm->send_sizes[i]);printf("] ");
printf("send_buffers=[ ");for(i=0;i<comm->num_sends;i++)printf("%08lx ",(uint64_t)comm->send_buffers[i]);printf("] ");
for(i=0;i<comm->num_blocks[0];i++)printf("[ %dx%dx%d from %d %d %d %d %d to %d %d %d %d %d ] ",comm->blocks[0][i].dim.i,comm->blocks[0][i].dim.j,comm->blocks[0][i].dim.k,comm->blocks[0][i].read.i,comm->blocks[0][i].read.j,comm->blocks[0][i].read.k,comm->blocks[0][i].read.jStride,comm->blocks[0][i].read.kStride,comm->blocks[0][i].write.i,comm->blocks[0][i].write.j,comm->blocks[0][i].write.k,comm->blocks[0][i].write.jStride,comm->blocks[0][i].write.kStride);
printf("\n");
}
if(printSendRecv & 0x2){
for(i=0;i<comm->num_blocks[1];i++)printf("[ %dx%dx%d from %d %d %d %d %d to %d %d %d %d %d ] ",comm->blocks[1][i].dim.i,comm->blocks[1][i].dim.j,comm->blocks[1][i].dim.k,comm->blocks[1][i].read.i,comm->blocks[1][i].read.j,comm->blocks[1][i].read.k,comm->blocks[1][i].read.jStride,comm->blocks[1][i].read.kStride,comm->blocks[1][i].write.i,comm->blocks[1][i].write.j,comm->blocks[1][i].write.k,comm->blocks[1][i].write.jStride,comm->blocks[1][i].write.kStride);
printf("\n");
}
if(printSendRecv & 0x4){
printf("num_recvs=%2d ",comm->num_recvs);
printf("recv_ranks=[ ");for(i=0;i<comm->num_recvs;i++)printf("%2d ",comm->recv_ranks[i]);printf("] ");
printf("recv_sizes=[ ");for(i=0;i<comm->num_recvs;i++)printf("%2d ",comm->recv_sizes[i]);printf("] ");
printf("recv_buffers=[ ");for(i=0;i<comm->num_recvs;i++)printf("%08lx ",(uint64_t)comm->recv_buffers[i]);printf("] ");
for(i=0;i<comm->num_blocks[2];i++)printf("[ %dx%dx%d from %d %d %d %d %d to %d %d %d %d %d ] ",comm->blocks[2][i].dim.i,comm->blocks[2][i].dim.j,comm->blocks[2][i].dim.k,comm->blocks[2][i].read.i,comm->blocks[2][i].read.j,comm->blocks[2][i].read.k,comm->blocks[2][i].read.jStride,comm->blocks[2][i].read.kStride,comm->blocks[2][i].write.i,comm->blocks[2][i].write.j,comm->blocks[2][i].write.k,comm->blocks[2][i].write.jStride,comm->blocks[2][i].write.kStride);
printf("\n");
}
fflush(stdout);
}
//------------------------------------------------------------------------------------------------------------------------------
typedef struct {
int sendRank;
int sendBoxID;
int sendBox;
int sendDir;
int recvRank;
int recvBoxID;
int recvBox;
} GZ_type;
int qsortGZ(const void *a, const void*b){
GZ_type *gza = (GZ_type*)a;
GZ_type *gzb = (GZ_type*)b;
// by convention, MPI buffers are first sorted by sendRank
if(gza->sendRank < gzb->sendRank)return(-1);
if(gza->sendRank > gzb->sendRank)return( 1);
// then by sendBoxID
if(gza->sendBoxID < gzb->sendBoxID)return(-1);
if(gza->sendBoxID > gzb->sendBoxID)return( 1);
// and finally by the direction sent
if(gza->sendDir < gzb->sendDir)return(-1);
if(gza->sendDir > gzb->sendDir)return( 1);
return(0);
}
int qsortInt(const void *a, const void *b){
int *ia = (int*)a;
int *ib = (int*)b;
if(*ia < *ib)return(-1);
if(*ia > *ib)return( 1);
return( 0);
}
int qsortBlock(const void *a, const void *b){
blockCopy_type *ba = (blockCopy_type*)a;
blockCopy_type *bb = (blockCopy_type*)b;
if(ba->write.box >= 0){
// sort by box...
if(ba->write.box < bb->write.box)return(-1);
if(ba->write.box > bb->write.box)return( 1);
// now sort by k
if(ba->write.k < bb->write.k )return(-1);
if(ba->write.k > bb->write.k )return( 1);
// now sort by j
if(ba->write.j < bb->write.j )return(-1);
if(ba->write.j > bb->write.j )return( 1);
// now sort by i
if(ba->write.i < bb->write.i )return(-1);
if(ba->write.i > bb->write.i )return( 1);
}else if(ba->read.box >= 0){
// sort by box...
if(ba->read.box < bb->read.box )return(-1);
if(ba->read.box > bb->read.box )return( 1);
// now sort by k
if(ba->read.k < bb->read.k )return(-1);
if(ba->read.k > bb->read.k )return( 1);
// now sort by j
if(ba->read.j < bb->read.j )return(-1);
if(ba->read.j > bb->read.j )return( 1);
// now sort by i
if(ba->read.i < bb->read.i )return(-1);
if(ba->read.i > bb->read.i )return( 1);
}
return( 0);
}
//------------------------------------------------------------------------------------------------------------------------------
void decompose_level_lex(int *rank_of_box, int idim, int jdim, int kdim, int ranks){
// simple lexicographical decomposition of the domain (i-j-k ordering)
// load balancing is easily realized
// unfortunately, each process will likely receive one or two long pencils of boxes.
// as such, the resultant surface:volum ratio will likely be poor
int boxes = idim*jdim*kdim;
int i,j,k;
for(k=0;k<kdim;k++){
for(j=0;j<jdim;j++){
for(i=0;i<idim;i++){
int b = k*jdim*idim + j*idim + i;
rank_of_box[b] = ((uint64_t)ranks*(uint64_t)b)/(uint64_t)boxes; // ranks*b can be as larger than ranks^2... can over flow int
}}}
}
//---------------------------------------------------------------------------------------------------------------------------------------------------
void decompose_level_bisection_special(int *rank_of_box, int jStride, int kStride, int ilo, int jlo, int klo, int idim, int jdim, int kdim, int rank_lo, int ranks){
// if possible, recursively partition the domain by a prime number (e.g. try an parition a 9^3 array into 3 equal pieces instead of 5x9^2 and 4x9^2)
// if not, default to simple bisection
// this function should ensure that each process receives a compact rectahedral collection of boxes
// however, load imbalance can occur
// the choice of whether to try and partition with the largest prime or smallest prime first is up to the user
#define numPrimes 13
//int primes[numPrimes] = {41,37,31,29,23,19,17,13,11,7,5,3,2};
int primes[numPrimes] = {2,3,5,7,11,13,17,19,23,29,31,37,41};
int i,j,k,p,f,ff;
// base case, no further recursion...
if( (ranks==1)|| ((idim==1)&&(jdim==1)&&(kdim==1)) ){
for(i=ilo;i<ilo+idim;i++){
for(j=jlo;j<jlo+jdim;j++){
for(k=klo;k<klo+kdim;k++){
int b = i + j*jStride + k*kStride;
rank_of_box[b] = rank_lo;
}}}
return;
}
// special cases for perfectly matched problem sizes with numbers of processes (but not powers of 2)...
for(p=0;p<numPrimes;p++){
f=primes[p];
if( (kdim>=idim)&&(kdim>=jdim) ){if( (kdim%f==0) && (ranks%f==0) ){for(ff=0;ff<f;ff++)decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo,jlo,klo+ff*kdim/f,idim,jdim,kdim/f,rank_lo+ff*ranks/f,ranks/f);return;}}
if( (jdim>=idim)&&(jdim>=kdim) ){if( (jdim%f==0) && (ranks%f==0) ){for(ff=0;ff<f;ff++)decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo,jlo+ff*jdim/f,klo,idim,jdim/f,kdim,rank_lo+ff*ranks/f,ranks/f);return;}}
if( (idim>=jdim)&&(idim>=kdim) ){if( (idim%f==0) && (ranks%f==0) ){for(ff=0;ff<f;ff++)decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo+ff*idim/f,jlo,klo,idim/f,jdim,kdim,rank_lo+ff*ranks/f,ranks/f);return;}}
}
// try and bisect the domain in the i-dimension
if( (idim>=jdim)&&(idim>=kdim) ){
int dim0 = (int)(0.5*(double)idim + 0.50);
int dim1 = idim-dim0;
int r0 = (int)( 0.5 + (double)ranks*(double)dim0/(double)idim );
int r1 = ranks-r0;
decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo ,jlo,klo,dim0,jdim,kdim,rank_lo ,r0); // lo
decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo+dim0,jlo,klo,dim1,jdim,kdim,rank_lo+r0,r1); // hi
return;
}
// try and bisect the domain in the j-dimension
if( (jdim>=idim)&&(jdim>=kdim) ){
int dim0 = (int)(0.5*(double)jdim + 0.50);
int dim1 = jdim-dim0;
int r0 = (int)( 0.5 + (double)ranks*(double)dim0/(double)jdim );
int r1 = ranks-r0;
decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo,jlo ,klo,idim,dim0,kdim,rank_lo ,r0); // lo
decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo,jlo+dim0,klo,idim,dim1,kdim,rank_lo+r0,r1); // hi
return;
}
// try and bisect the domain in the k-dimension
if( (kdim>=idim)&&(kdim>=jdim) ){
int dim0 = (int)(0.5*(double)kdim + 0.50);
int dim1 = kdim-dim0;
int r0 = (int)( 0.5 + (double)ranks*(double)dim0/(double)kdim );
int r1 = ranks-r0;
decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo,jlo,klo ,idim,jdim,dim0,rank_lo ,r0); // lo
decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo,jlo,klo+dim0,idim,jdim,dim1,rank_lo+r0,r1); // hi
return;
}
fprintf(stderr,"decompose_level_bisection_special failed !!!\n");exit(0);
}
//---------------------------------------------------------------------------------------------------------------------------------------------------
void decompose_level_bisection(int *rank_of_box, int jStride, int kStride, int ilo, int jlo, int klo, int idim, int jdim, int kdim, int ranks, int sfc_offset, int sfc_max_length){
// base case...
if( (idim==1) && (jdim==1) && (kdim==1) ){
int b = ilo + jlo*jStride + klo*kStride;
rank_of_box[b] = ((uint64_t)ranks*(uint64_t)sfc_offset)/(uint64_t)sfc_max_length; // sfc_max_length is the precomputed maximum length
return;
}
// try and bisect the domain in the i-dimension
if( (idim>=jdim)&&(idim>=kdim) ){
int dim0 = (int)(0.5*(double)idim + 0.50);
int dim1 = idim-dim0;
int sfc_delta = dim0*jdim*kdim;
decompose_level_bisection(rank_of_box,jStride,kStride,ilo ,jlo,klo,dim0,jdim,kdim,ranks,sfc_offset ,sfc_max_length); // lo
decompose_level_bisection(rank_of_box,jStride,kStride,ilo+dim0,jlo,klo,dim1,jdim,kdim,ranks,sfc_offset+sfc_delta,sfc_max_length); // hi
return;
}
// try and bisect the domain in the j-dimension
if( (jdim>=idim)&&(jdim>=kdim) ){
int dim0 = (int)(0.5*(double)jdim + 0.50);
int dim1 = jdim-dim0;
int sfc_delta = idim*dim0*kdim;
decompose_level_bisection(rank_of_box,jStride,kStride,ilo,jlo ,klo,idim,dim0,kdim,ranks,sfc_offset ,sfc_max_length); // lo
decompose_level_bisection(rank_of_box,jStride,kStride,ilo,jlo+dim0,klo,idim,dim1,kdim,ranks,sfc_offset+sfc_delta,sfc_max_length); // hi
return;
}
// try and bisect the domain in the k-dimension
if( (kdim>=idim)&&(kdim>=jdim) ){
int dim0 = (int)(0.5*(double)kdim + 0.50);
int dim1 = kdim-dim0;
int sfc_delta = idim*jdim*dim0;
decompose_level_bisection(rank_of_box,jStride,kStride,ilo,jlo,klo ,idim,jdim,dim0,ranks,sfc_offset ,sfc_max_length); // lo
decompose_level_bisection(rank_of_box,jStride,kStride,ilo,jlo,klo+dim0,idim,jdim,dim1,ranks,sfc_offset+sfc_delta,sfc_max_length); // hi
return;
}
// failure...
fprintf(stderr,"decompose_level_bisection failed !!!\n");exit(0);
}
//---------------------------------------------------------------------------------------------------------------------------------------------------
// Given a bounding box (idim,jdim,kdim) use a Z-morton Space Filling Curve (SFC) to assign the boxes within the (boxes_in_i,boxes_in_j,boxes_in_k) valid region domain
// sfc_offset is the current offset within the space filling curve (starts with 0)
// this function returns the new offset based on how many actual boxes it found within (ilo,jlo,klo) + (idim,jdim,kdim)
// sfc_max_length is the maximum length of the SFC. Note, if this length exceeds boxes_in_i*boxes_in_j*boxes_in_k, then some processes with receive no work
int decompose_level_zmort(int *rank_of_box, int boxes_in_i, int boxes_in_j, int boxes_in_k, int ilo, int jlo, int klo, int idim, int jdim, int kdim, int ranks, int sfc_offset, int sfc_max_length){
// invalid cases...
if(idim<1)return(sfc_offset);
if(jdim<1)return(sfc_offset);
if(kdim<1)return(sfc_offset);
if(ilo <0)return(sfc_offset);
if(jlo <0)return(sfc_offset);
if(klo <0)return(sfc_offset);
// base case...
if( (idim==1) && (jdim==1) && (kdim==1) ){
if( (ilo<boxes_in_i) && (jlo<boxes_in_j) && (klo<boxes_in_k) ){
// deemed a valid box (could be augmented for irregular domains)
int b = ilo + jlo*boxes_in_i + klo*boxes_in_i*boxes_in_j;
rank_of_box[b] = ((uint64_t)ranks*(uint64_t)(sfc_offset))/(uint64_t)sfc_max_length; // sfc_max_length is the precomputed maximum length
return(sfc_offset+1);
}
return(sfc_offset); // region outside valid domain; sfc_offset is unchanged
}
// bisect in 3D...
int imid = ilo + (idim/2);
int jmid = jlo + (jdim/2);
int kmid = klo + (kdim/2);
sfc_offset=decompose_level_zmort(rank_of_box,boxes_in_i,boxes_in_j,boxes_in_k,ilo ,jlo ,klo , idim/2, jdim/2, kdim/2,ranks,sfc_offset,sfc_max_length);
sfc_offset=decompose_level_zmort(rank_of_box,boxes_in_i,boxes_in_j,boxes_in_k,imid,jlo ,klo ,idim-idim/2, jdim/2, kdim/2,ranks,sfc_offset,sfc_max_length);
sfc_offset=decompose_level_zmort(rank_of_box,boxes_in_i,boxes_in_j,boxes_in_k,ilo ,jmid,klo , idim/2,jdim-jdim/2, kdim/2,ranks,sfc_offset,sfc_max_length);
sfc_offset=decompose_level_zmort(rank_of_box,boxes_in_i,boxes_in_j,boxes_in_k,imid,jmid,klo ,idim-idim/2,jdim-jdim/2, kdim/2,ranks,sfc_offset,sfc_max_length);
sfc_offset=decompose_level_zmort(rank_of_box,boxes_in_i,boxes_in_j,boxes_in_k,ilo ,jlo ,kmid, idim/2, jdim/2,kdim-kdim/2,ranks,sfc_offset,sfc_max_length);
sfc_offset=decompose_level_zmort(rank_of_box,boxes_in_i,boxes_in_j,boxes_in_k,imid,jlo ,kmid,idim-idim/2, jdim/2,kdim-kdim/2,ranks,sfc_offset,sfc_max_length);
sfc_offset=decompose_level_zmort(rank_of_box,boxes_in_i,boxes_in_j,boxes_in_k,ilo ,jmid,kmid, idim/2,jdim-jdim/2,kdim-kdim/2,ranks,sfc_offset,sfc_max_length);
sfc_offset=decompose_level_zmort(rank_of_box,boxes_in_i,boxes_in_j,boxes_in_k,imid,jmid,kmid,idim-idim/2,jdim-jdim/2,kdim-kdim/2,ranks,sfc_offset,sfc_max_length);
return(sfc_offset);
}
//------------------------------------------------------------------------------------------------------------------------------
//int decompose_level_hilbert(int *rank_of_box, int boxes_in_i, int boxes_in_j, int boxes_in_k, int ilo, int jlo, int klo, int idim, int jdim, int kdim, int ranks, int sfc_offset, int sfc_max_length){
// implements a 3D hilbert curve on the non-power of two domain using a power of two bounding box
//}
//---------------------------------------------------------------------------------------------------------------------------------------------------
void print_decomposition(level_type *level){
if(level->my_rank!=0)return;
printf("\n");
int i,j,k;
int jStride = level->boxes_in.i;
int kStride = level->boxes_in.i*level->boxes_in.j;
for(k=level->boxes_in.k-1;k>=0;k--){ // (i,j,k)=(0,0,0) is bottom left corner
for(j=level->boxes_in.j-1;j>=0;j--){ // (i,j)=(0,0) is bottom left corner
for(i=0;i<j;i++)printf(" ");
for(i=0;i<level->boxes_in.i;i++){
int b = i + j*jStride + k*kStride;
printf("%4d ",level->rank_of_box[b]);
}printf("\n");
}printf("\n\n");
}
fflush(stdout);
}
//------------------------------------------------------------------------------------------------------------------------------
// append the specified block (logical region) to the current list of blocks
// each block may be tiled to...
// - create more parallelism across the list of blocks
// - limit parallelism within a block
// - limit the memory requirements for each block
#ifndef BLOCK_LIST_MIN_SIZE
#define BLOCK_LIST_MIN_SIZE 1000
#endif
void append_block_to_list(blockCopy_type ** blocks, int *allocated_blocks, int *num_blocks,
int dim_i, int dim_j, int dim_k,
int read_box, double* read_ptr, int read_i, int read_j, int read_k, int read_jStride, int read_kStride, int read_scale,
int write_box, double* write_ptr, int write_i, int write_j, int write_k, int write_jStride, int write_kStride, int write_scale,
int blockcopy_tile_i, int blockcopy_tile_j, int blockcopy_tile_k,
int subtype
){
// Take a dim_j x dim_k iteration space and tile it into smaller faces of size blockcopy_tile_j x blockcopy_tile_k
// This increases the number of blockCopies in the ghost zone exchange and thereby increases the thread-level parallelism
#if 0
// use recursive (z-mort) ordering of tiles in order to improve locality on deep memory hierarchies...
int doRecursion=0;
if(dim_i > blockcopy_tile_i)doRecursion=1;
if(dim_j > blockcopy_tile_j)doRecursion=1;
if(dim_k > blockcopy_tile_k)doRecursion=1;
if( read_scale != 1)doRecursion=0; // disable recursion for restriction
if(write_scale != 1)doRecursion=0; // disable recursion for interpolation
if(doRecursion){
int mid_i = (dim_i + 1)/2;
int mid_j = (dim_j + 1)/2;
int mid_k = (dim_k + 1)/2;
mid_i = blockcopy_tile_i*( (mid_i+blockcopy_tile_i-1)/blockcopy_tile_i);
mid_j = blockcopy_tile_j*( (mid_j+blockcopy_tile_j-1)/blockcopy_tile_j);
mid_k = blockcopy_tile_k*( (mid_k+blockcopy_tile_k-1)/blockcopy_tile_k);
if(mid_i>dim_i)mid_i=dim_i;
if(mid_j>dim_j)mid_j=dim_j;
if(mid_k>dim_k)mid_k=dim_k;
append_block_to_list(blocks,allocated_blocks,num_blocks, mid_i, mid_j, mid_k,
read_box, read_ptr, read_i , read_j , read_k , read_jStride, read_kStride, read_scale,
write_box,write_ptr,write_i ,write_j ,write_k ,write_jStride,write_kStride,write_scale,
blockcopy_tile_i,blockcopy_tile_j,blockcopy_tile_k,subtype);
append_block_to_list(blocks,allocated_blocks,num_blocks,dim_i-mid_i, mid_j, mid_k,
read_box, read_ptr, read_i+mid_i, read_j , read_k , read_jStride, read_kStride, read_scale,
write_box,write_ptr,write_i+mid_i,write_j ,write_k ,write_jStride,write_kStride,write_scale,
blockcopy_tile_i,blockcopy_tile_j,blockcopy_tile_k,subtype);
append_block_to_list(blocks,allocated_blocks,num_blocks, mid_i,dim_j-mid_j, mid_k,
read_box, read_ptr, read_i , read_j+mid_j, read_k , read_jStride, read_kStride, read_scale,
write_box,write_ptr,write_i ,write_j+mid_j,write_k ,write_jStride,write_kStride,write_scale,
blockcopy_tile_i,blockcopy_tile_j,blockcopy_tile_k,subtype);
append_block_to_list(blocks,allocated_blocks,num_blocks,dim_i-mid_i,dim_j-mid_j, mid_k,
read_box, read_ptr, read_i+mid_i, read_j+mid_j, read_k , read_jStride, read_kStride, read_scale,
write_box,write_ptr,write_i+mid_i,write_j+mid_j,write_k ,write_jStride,write_kStride,write_scale,
blockcopy_tile_i,blockcopy_tile_j,blockcopy_tile_k,subtype);
append_block_to_list(blocks,allocated_blocks,num_blocks, mid_i, mid_j,dim_k-mid_k,
read_box, read_ptr, read_i , read_j , read_k+mid_k, read_jStride, read_kStride, read_scale,
write_box,write_ptr,write_i ,write_j ,write_k+mid_k,write_jStride,write_kStride,write_scale,
blockcopy_tile_i,blockcopy_tile_j,blockcopy_tile_k,subtype);
append_block_to_list(blocks,allocated_blocks,num_blocks,dim_i-mid_i, mid_j,dim_k-mid_k,
read_box, read_ptr, read_i+mid_i, read_j , read_k+mid_k, read_jStride, read_kStride, read_scale,
write_box,write_ptr,write_i+mid_i,write_j ,write_k+mid_k,write_jStride,write_kStride,write_scale,
blockcopy_tile_i,blockcopy_tile_j,blockcopy_tile_k,subtype);
append_block_to_list(blocks,allocated_blocks,num_blocks, mid_i,dim_j-mid_j,dim_k-mid_k,
read_box, read_ptr, read_i , read_j+mid_j, read_k+mid_k, read_jStride, read_kStride, read_scale,
write_box,write_ptr,write_i ,write_j+mid_j,write_k+mid_k,write_jStride,write_kStride,write_scale,
blockcopy_tile_i,blockcopy_tile_j,blockcopy_tile_k,subtype);
append_block_to_list(blocks,allocated_blocks,num_blocks,dim_i-mid_i,dim_j-mid_j,dim_k-mid_k,
read_box, read_ptr, read_i+mid_i, read_j+mid_j, read_k+mid_k, read_jStride, read_kStride, read_scale,
write_box,write_ptr,write_i+mid_i,write_j+mid_j,write_k+mid_k,write_jStride,write_kStride,write_scale,
blockcopy_tile_i,blockcopy_tile_j,blockcopy_tile_k,subtype);
return;
}
#endif
// read_/write_scale are used to stride appropriately when read and write loop iterations spaces are different
// ghostZone: read_scale=1, write_scale=1
// interpolation: read_scale=1, write_scale=2
// restriction: read_scale=2, write_scale=1
// FIX... dim_i,j,k -> read_dim_i,j,k, write_dim_i,j,k
int ii,jj,kk;
for(kk=0;kk<dim_k;kk+=blockcopy_tile_k){
for(jj=0;jj<dim_j;jj+=blockcopy_tile_j){
for(ii=0;ii<dim_i;ii+=blockcopy_tile_i){
int dim_k_mod = dim_k-kk;if(dim_k_mod>blockcopy_tile_k)dim_k_mod=blockcopy_tile_k;
int dim_j_mod = dim_j-jj;if(dim_j_mod>blockcopy_tile_j)dim_j_mod=blockcopy_tile_j;
int dim_i_mod = dim_i-ii;if(dim_i_mod>blockcopy_tile_i)dim_i_mod=blockcopy_tile_i;
if(*num_blocks >= *allocated_blocks){
int oldSize = *allocated_blocks;
if(*allocated_blocks == 0){*allocated_blocks=BLOCK_LIST_MIN_SIZE;*blocks=(blockCopy_type*) malloc( (*allocated_blocks)*sizeof(blockCopy_type));}
else{*allocated_blocks*=2; *blocks=(blockCopy_type*)realloc((void*)(*blocks),(*allocated_blocks)*sizeof(blockCopy_type));}
if(*blocks == NULL){fprintf(stderr,"realloc failed - append_block_to_list (%d -> %d)\n",oldSize,*allocated_blocks);exit(0);}
}
(*blocks)[*num_blocks].subtype = subtype;
(*blocks)[*num_blocks].dim.i = dim_i_mod;
(*blocks)[*num_blocks].dim.j = dim_j_mod;
(*blocks)[*num_blocks].dim.k = dim_k_mod;
(*blocks)[*num_blocks].read.box = read_box;
(*blocks)[*num_blocks].read.ptr = read_ptr;
(*blocks)[*num_blocks].read.i = read_i + read_scale*ii;
(*blocks)[*num_blocks].read.j = read_j + read_scale*jj;
(*blocks)[*num_blocks].read.k = read_k + read_scale*kk;
(*blocks)[*num_blocks].read.jStride = read_jStride;
(*blocks)[*num_blocks].read.kStride = read_kStride;
(*blocks)[*num_blocks].write.box = write_box;
(*blocks)[*num_blocks].write.ptr = write_ptr;
(*blocks)[*num_blocks].write.i = write_i + write_scale*ii;
(*blocks)[*num_blocks].write.j = write_j + write_scale*jj;
(*blocks)[*num_blocks].write.k = write_k + write_scale*kk;
(*blocks)[*num_blocks].write.jStride = write_jStride;
(*blocks)[*num_blocks].write.kStride = write_kStride;
(*num_blocks)++;
}}}
}
//----------------------------------------------------------------------------------------------------------------------------------------------------
// create a mini program that traverses the domain boundary intersecting with this process's boxes
// This includes faces, corners, and edges
void build_boundary_conditions(level_type *level, int shape){
level->boundary_condition.blocks[shape] = NULL; // default for periodic (i.e. no BC's)
level->boundary_condition.num_blocks[shape] = 0; // default for periodic (i.e. no BC's)
level->boundary_condition.allocated_blocks[shape] = 0; // default for periodic (i.e. no BC's)
if(level->boundary_condition.type == BC_PERIODIC)return;
//int faces[27] = {0,0,0,0,1,0,0,0,0, 0,1,0,1,0,1,0,1,0, 0,0,0,0,1,0,0,0,0};
int edges[27] = {0,1,0,1,0,1,0,1,0, 1,0,1,0,0,0,1,0,1, 0,1,0,1,0,1,0,1,0};
int corners[27] = {1,0,1,0,0,0,1,0,1, 0,0,0,0,0,0,0,0,0, 1,0,1,0,0,0,1,0,1};
int box, di,dj,dk;
for(box=0;box<level->num_my_boxes;box++){ // traverse my list of boxes...
for(dk=-1;dk<=1;dk++){ // for each box, examine its 26 neighbors...
for(dj=-1;dj<=1;dj++){
for(di=-1;di<=1;di++){
int dir = 13+di+3*dj+9*dk; // face/edge/corner of *THIS* box (not the domain)
// determine if this region (box's di,dj,dk ghost zone) is outside of the domain
int regionIsOutside=0;
int normal = 13; // normal effectively defines the normal vector to the *DOMAIN* for this region...
// this addition is necessary for linearly interpolated BC's as a box's corner is not necessarily a domain's corner
int myBox_i = level->my_boxes[box].low.i / level->box_dim;
int myBox_j = level->my_boxes[box].low.j / level->box_dim;
int myBox_k = level->my_boxes[box].low.k / level->box_dim;
int neighborBox_i = ( myBox_i + di );
int neighborBox_j = ( myBox_j + dj );
int neighborBox_k = ( myBox_k + dk );
if( neighborBox_i < 0 ){regionIsOutside=1;normal-=1;}
if( neighborBox_j < 0 ){regionIsOutside=1;normal-=3;}
if( neighborBox_k < 0 ){regionIsOutside=1;normal-=9;}
if( neighborBox_i >=level->boxes_in.i ){regionIsOutside=1;normal+=1;}
if( neighborBox_j >=level->boxes_in.j ){regionIsOutside=1;normal+=3;}
if( neighborBox_k >=level->boxes_in.k ){regionIsOutside=1;normal+=9;}
// calculate ghost zone region size and coordinates relative to the first non-ghost zone element (0,0,0)
int block_i=-1,block_j=-1,block_k=-1;
int dim_i=-1, dim_j=-1, dim_k=-1;
switch(di){
case -1:dim_i=level->box_ghosts;block_i=0-level->box_ghosts;break;
case 0:dim_i=level->box_dim; block_i=0; break;
case 1:dim_i=level->box_ghosts;block_i=0+level->box_dim; break;
}
switch(dj){
case -1:dim_j=level->box_ghosts;block_j=0-level->box_ghosts;break;
case 0:dim_j=level->box_dim; block_j=0; break;
case 1:dim_j=level->box_ghosts;block_j=0+level->box_dim; break;
}
switch(dk){
case -1:dim_k=level->box_ghosts;block_k=0-level->box_ghosts;break;
case 0:dim_k=level->box_dim; block_k=0; break;
case 1:dim_k=level->box_ghosts;block_k=0+level->box_dim; break;
}
// use regionIsOutside to short circuit logic and cull unnecessary regions...
switch(shape){
case STENCIL_SHAPE_STAR: if(edges[dir]||corners[dir])regionIsOutside=0;break; // star-shaped stencils don't need BC's enforced on corners or edges
case STENCIL_SHAPE_NO_CORNERS:if( corners[dir])regionIsOutside=0;break; // these stencils don't need BC's enforced on edges
}
// default tile sizes...
// NOTE, BC's may never tile smaller than the ghost zone depth
int blockcopy_i = (BLOCKCOPY_TILE_I < level->box_ghosts) ? level->box_ghosts : BLOCKCOPY_TILE_I;
int blockcopy_j = (BLOCKCOPY_TILE_J < level->box_ghosts) ? level->box_ghosts : BLOCKCOPY_TILE_J;
int blockcopy_k = (BLOCKCOPY_TILE_K < level->box_ghosts) ? level->box_ghosts : BLOCKCOPY_TILE_K;
#if 0
// 2D tiling of faces
// 1D tiling of edges
// corners use defaults
switch(dir){
case 1:blockcopy_i= 8;blockcopy_j=10000;blockcopy_k=10000;break; // i edge
case 3:blockcopy_i=10000;blockcopy_j= 8;blockcopy_k=10000;break; // j edge
case 4:blockcopy_i= 8;blockcopy_j= 8;blockcopy_k=10000;break; // ij face
case 5:blockcopy_i=10000;blockcopy_j= 8;blockcopy_k=10000;break; // j edge
case 7:blockcopy_i= 8;blockcopy_j=10000;blockcopy_k=10000;break; // i edge
case 9:blockcopy_i=10000;blockcopy_j=10000;blockcopy_k= 8;break; // k edge
case 10:blockcopy_i= 8;blockcopy_j=10000;blockcopy_k= 8;break; // ik face
case 11:blockcopy_i=10000;blockcopy_j=10000;blockcopy_k= 8;break; // k edge
case 12:blockcopy_i=10000;blockcopy_j= 8;blockcopy_k= 8;break; // jk face
case 14:blockcopy_i=10000;blockcopy_j= 8;blockcopy_k= 8;break; // jk face
case 15:blockcopy_i=10000;blockcopy_j=10000;blockcopy_k= 8;break; // k edge
case 16:blockcopy_i= 8;blockcopy_j=10000;blockcopy_k= 8;break; // ik face
case 17:blockcopy_i=10000;blockcopy_j=10000;blockcopy_k= 8;break; // k edge
case 19:blockcopy_i= 8;blockcopy_j=10000;blockcopy_k=10000;break; // i edge
case 21:blockcopy_i=10000;blockcopy_j= 8;blockcopy_k=10000;break; // j edge
case 22:blockcopy_i= 8;blockcopy_j= 8;blockcopy_k=10000;break; // ij face
case 23:blockcopy_i=10000;blockcopy_j= 8;blockcopy_k=10000;break; // j edge
case 25:blockcopy_i= 8;blockcopy_j=10000;blockcopy_k=10000;break; // i edge
}
#endif
if(regionIsOutside){
append_block_to_list(&(level->boundary_condition.blocks[shape]),&(level->boundary_condition.allocated_blocks[shape]),&(level->boundary_condition.num_blocks[shape]),
/* dim.i = */ dim_i,
/* dim.j = */ dim_j,
/* dim.k = */ dim_k,
/* read.box = */ box,
/* read.ptr = */ NULL,
/* read.i = */ block_i,
/* read.j = */ block_j,
/* read.k = */ block_k,
/* read.jStride = */ level->my_boxes[box].jStride,
/* read.kStride = */ level->my_boxes[box].kStride,
/* read.scale = */ 1,
/* write.box = */ box,
/* write.ptr = */ NULL,
/* write.i = */ block_i,
/* write.j = */ block_j,
/* write.k = */ block_k,
/* write.jStride = */ level->my_boxes[box].jStride,
/* write.kStride = */ level->my_boxes[box].kStride,
/* write.scale = */ 1,
/* blockcopy_i = */ blockcopy_i,
/* blockcopy_j = */ blockcopy_j,
/* blockcopy_k = */ blockcopy_k,
/* subtype = */ normal
);
}}}}}
#ifdef BLOCK_SPATIAL_SORT
// sort all the resultant blocks by box,k,j,i (good locality)
qsort(level->boundary_condition.blocks[shape],level->boundary_condition.num_blocks[shape],sizeof(blockCopy_type),qsortBlock);
#endif
}
//----------------------------------------------------------------------------------------------------------------------------------------------------
// create a mini program that packs data into MPI recv buffers, exchanges local data, and unpacks the MPI send buffers
// broadly speaking...
// 1. traverse my list of Boxes and create a list of ghosts that must be sent
// 2. create a list of neighbors to send to
// 3. allocate and populate the pack list and allocate the send buffers
// 4. allocate and populate the local exchange list
// 5. traverse my list of Boxes and create a list of ghosts that must be received
// 6. create a list of neighbors to receive from
// 7. allocate and populate the unpack list and allocate the recv buffers
//
// thus a ghost zone exchange is
// 1. prepost a Irecv for each MPI recv buffer (1 per neighbor)
// 2. traverse the pack list
// 3. post the Isends for each MPI send buffer (1 per neighbor)
// 4. traverse the local copy list
// 5. waitall
// 6. traverse the unpack list
//
// / 24 25 26 /
// / 21 22 23 / (k+1)
// / 18 19 20 /
//
// / 15 16 17 /
// / 12 13 14 / (k)
// / 9 10 11 /
//
// / 6 7 8 /
// / 3 4 5 / (k-1)
// / 0 1 2 /
//
void build_exchange_ghosts(level_type *level, int shape){
int faces[27] = {0,0,0,0,1,0,0,0,0, 0,1,0,1,0,1,0,1,0, 0,0,0,0,1,0,0,0,0};
int edges[27] = {0,1,0,1,0,1,0,1,0, 1,0,1,0,0,0,1,0,1, 0,1,0,1,0,1,0,1,0};
int corners[27] = {1,0,1,0,0,0,1,0,1, 0,0,0,0,0,0,0,0,0, 1,0,1,0,0,0,1,0,1};
// initialize to defaults...
level->exchange_ghosts[shape].num_recvs = 0;
level->exchange_ghosts[shape].num_sends = 0;
level->exchange_ghosts[shape].recv_ranks = NULL;
level->exchange_ghosts[shape].send_ranks = NULL;
level->exchange_ghosts[shape].recv_sizes = NULL;
level->exchange_ghosts[shape].send_sizes = NULL;
level->exchange_ghosts[shape].recv_buffers = NULL;
level->exchange_ghosts[shape].send_buffers = NULL;
level->exchange_ghosts[shape].blocks[0] = NULL;
level->exchange_ghosts[shape].blocks[1] = NULL;
level->exchange_ghosts[shape].blocks[2] = NULL;
level->exchange_ghosts[shape].num_blocks[0] = 0;
level->exchange_ghosts[shape].num_blocks[1] = 0;
level->exchange_ghosts[shape].num_blocks[2] = 0;
level->exchange_ghosts[shape].allocated_blocks[0] = 0;
level->exchange_ghosts[shape].allocated_blocks[1] = 0;
level->exchange_ghosts[shape].allocated_blocks[2] = 0;
#ifdef USE_MPI
level->exchange_ghosts[shape].requests = NULL;
level->exchange_ghosts[shape].status = NULL;
#endif
int n,CommunicateThisDir[27];for(n=0;n<27;n++)CommunicateThisDir[n] = faces[n] + edges[n] + corners[n];// to be safe, communicate everything
switch(shape){
case STENCIL_SHAPE_BOX: for(n=0;n<27;n++)CommunicateThisDir[n] = faces[n] + edges[n] + corners[n];break;
case STENCIL_SHAPE_STAR: for(n=0;n<27;n++)CommunicateThisDir[n] = faces[n] ;break;
case STENCIL_SHAPE_NO_CORNERS:for(n=0;n<27;n++)CommunicateThisDir[n] = faces[n] + edges[n] ;break;
}
int sendBox,recvBox;
int stage;
int _rank;
int ghost,numGhosts,numGhostsRemote;
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// traverse my list of boxes and create a lists of neighboring boxes and neighboring ranks
GZ_type *ghostsToSend = (GZ_type*)malloc(26*level->num_my_boxes*sizeof(GZ_type)); // There are at most 26 neighbors per box.
int *sendRanks = ( int*)malloc(26*level->num_my_boxes*sizeof( int)); // There are at most 26 neighbors per box.
if(level->num_my_boxes>0){
if(ghostsToSend == NULL){fprintf(stderr,"malloc failed - build_exchange_ghosts/ghostsToSend\n");exit(0);}
if(sendRanks == NULL){fprintf(stderr,"malloc failed - build_exchange_ghosts/sendRanks \n");exit(0);}
}
numGhosts = 0;
numGhostsRemote = 0;
for(sendBox=0;sendBox<level->num_my_boxes;sendBox++){
int di,dj,dk;
for(dk=-1;dk<=1;dk++){
for(dj=-1;dj<=1;dj++){
for(di=-1;di<=1;di++){
int dir = 13+di+3*dj+9*dk;if(CommunicateThisDir[dir]){
int myBoxID = level->my_boxes[sendBox].global_box_id;
int myBox_i = level->my_boxes[sendBox].low.i / level->box_dim;
int myBox_j = level->my_boxes[sendBox].low.j / level->box_dim;
int myBox_k = level->my_boxes[sendBox].low.k / level->box_dim;
int neighborBoxID = -1;
if(level->boundary_condition.type == BC_PERIODIC){
int neighborBox_i = ( myBox_i + di + level->boxes_in.i) % level->boxes_in.i;
int neighborBox_j = ( myBox_j + dj + level->boxes_in.j) % level->boxes_in.j;
int neighborBox_k = ( myBox_k + dk + level->boxes_in.k) % level->boxes_in.k;
neighborBoxID = neighborBox_i + neighborBox_j*level->boxes_in.i + neighborBox_k*level->boxes_in.i*level->boxes_in.j;
}else{
int neighborBox_i = ( myBox_i + di );
int neighborBox_j = ( myBox_j + dj );
int neighborBox_k = ( myBox_k + dk );
if( (neighborBox_i>=0) && (neighborBox_i<level->boxes_in.i) &&
(neighborBox_j>=0) && (neighborBox_j<level->boxes_in.j) &&
(neighborBox_k>=0) && (neighborBox_k<level->boxes_in.k) ){ // i.e. the neighbor is a valid box
neighborBoxID = neighborBox_i + neighborBox_j*level->boxes_in.i + neighborBox_k*level->boxes_in.i*level->boxes_in.j;
}
}
if(neighborBoxID>=0){
if( level->rank_of_box[neighborBoxID] != -1 ){
ghostsToSend[numGhosts].sendRank = level->my_rank;
ghostsToSend[numGhosts].sendBoxID = myBoxID;
ghostsToSend[numGhosts].sendBox = sendBox;
ghostsToSend[numGhosts].sendDir = dir;
ghostsToSend[numGhosts].recvRank = level->rank_of_box[neighborBoxID];
ghostsToSend[numGhosts].recvBoxID = neighborBoxID;
ghostsToSend[numGhosts].recvBox = -1;
if( level->rank_of_box[neighborBoxID] != level->my_rank ){
sendRanks[numGhostsRemote++] = level->rank_of_box[neighborBoxID];
}else{
int recvBox=0;while(level->my_boxes[recvBox].global_box_id!=neighborBoxID)recvBox++; // search my list of boxes for the appropriate recvBox index
ghostsToSend[numGhosts].recvBox = recvBox;
}
numGhosts++;
}}
}}}}
}
// sort boxes by sendRank(==my rank) then by sendBoxID... ensures the sends and receive buffers are always sorted by sendBoxID...
qsort(ghostsToSend,numGhosts ,sizeof(GZ_type),qsortGZ );
// sort the lists of neighboring ranks and remove duplicates...
qsort(sendRanks ,numGhostsRemote,sizeof( int),qsortInt);
int numSendRanks=0;_rank=-1;for(ghost=0;ghost<numGhostsRemote;ghost++)if(sendRanks[ghost] != _rank){_rank=sendRanks[ghost];sendRanks[numSendRanks++]=sendRanks[ghost];}
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// in a two-stage process, traverse the list of ghosts and allocate the pack/local lists as well as the MPI buffers, and then populate the pack/local lists
level->exchange_ghosts[shape].num_sends = numSendRanks;
level->exchange_ghosts[shape].send_ranks = (int*)malloc(numSendRanks*sizeof(int));
level->exchange_ghosts[shape].send_sizes = (int*)malloc(numSendRanks*sizeof(int));
level->exchange_ghosts[shape].send_buffers = (double**)malloc(numSendRanks*sizeof(double*));
if(numSendRanks>0){
if(level->exchange_ghosts[shape].send_ranks ==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].send_ranks\n",shape);exit(0);}
if(level->exchange_ghosts[shape].send_sizes ==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].send_sizes\n",shape);exit(0);}
if(level->exchange_ghosts[shape].send_buffers==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].send_buffers\n",shape);exit(0);}
}
level->exchange_ghosts[shape].blocks[0] = NULL;
level->exchange_ghosts[shape].blocks[1] = NULL;
level->exchange_ghosts[shape].num_blocks[0] = 0;
level->exchange_ghosts[shape].num_blocks[1] = 0;
level->exchange_ghosts[shape].allocated_blocks[0] = 0;
level->exchange_ghosts[shape].allocated_blocks[1] = 0;
for(stage=0;stage<=1;stage++){
// stage=0... traverse the list and calculate the buffer sizes
// stage=1... allocate MPI send buffers, traverse the list, and populate the unpack/local lists...
int neighbor;
for(neighbor=0;neighbor<numSendRanks;neighbor++){
if(stage==1){
level->exchange_ghosts[shape].send_buffers[neighbor] = (double*)malloc(level->exchange_ghosts[shape].send_sizes[neighbor]*sizeof(double));
if(level->exchange_ghosts[shape].send_sizes[neighbor]>0)
if(level->exchange_ghosts[shape].send_buffers[neighbor]==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].send_buffers[neighbor]\n",shape);exit(0);}
memset(level->exchange_ghosts[shape].send_buffers[neighbor], 0,level->exchange_ghosts[shape].send_sizes[neighbor]*sizeof(double));
}
level->exchange_ghosts[shape].send_ranks[neighbor]=sendRanks[neighbor];
level->exchange_ghosts[shape].send_sizes[neighbor]=0;
}
for(ghost=0;ghost<numGhosts;ghost++){
int dim_i=-1, dim_j=-1, dim_k=-1;
int send_i=-1,send_j=-1,send_k=-1;
int recv_i=-1,recv_j=-1,recv_k=-1;
// decode ghostsToSend[ghost].sendDir (direction sent) into di/dj/dk
int di = ((ghostsToSend[ghost].sendDir % 3) )-1;
int dj = ((ghostsToSend[ghost].sendDir % 9)/3)-1;
int dk = ((ghostsToSend[ghost].sendDir / 9) )-1;
switch(di){ // direction relative to sender
case -1:send_i=0; dim_i=level->box_ghosts;recv_i= level->box_dim; break;
case 0:send_i=0; dim_i=level->box_dim; recv_i=0; break;
case 1:send_i=level->box_dim-level->box_ghosts;dim_i=level->box_ghosts;recv_i=0-level->box_ghosts;break;
}
switch(dj){ // direction relative to sender
case -1:send_j=0; dim_j=level->box_ghosts;recv_j= level->box_dim; break;
case 0:send_j=0; dim_j=level->box_dim; recv_j=0; break;
case 1:send_j=level->box_dim-level->box_ghosts;dim_j=level->box_ghosts;recv_j=0-level->box_ghosts;break;
}
switch(dk){ // direction relative to sender
case -1:send_k=0; dim_k=level->box_ghosts;recv_k= level->box_dim; break;
case 0:send_k=0; dim_k=level->box_dim; recv_k=0; break;
case 1:send_k=level->box_dim-level->box_ghosts;dim_k=level->box_ghosts;recv_k=0-level->box_ghosts;break;
}
// determine if this ghost requires a pack or local exchange
int LocalExchange; // 0 = pack list, 1 = local exchange list
if(ghostsToSend[ghost].recvRank != level->my_rank){
LocalExchange=0; // pack
neighbor=0;while(level->exchange_ghosts[shape].send_ranks[neighbor] != ghostsToSend[ghost].recvRank)neighbor++;
}else{
LocalExchange=1; // local
neighbor=-1;
}
if(stage==1){
if(LocalExchange) // append to the local exchange list...
append_block_to_list(&(level->exchange_ghosts[shape].blocks[1]),&(level->exchange_ghosts[shape].allocated_blocks[1]),&(level->exchange_ghosts[shape].num_blocks[1]),
/* dim.i = */ dim_i,
/* dim.j = */ dim_j,
/* dim.k = */ dim_k,
/* read.box = */ ghostsToSend[ghost].sendBox,
/* read.ptr = */ NULL,
/* read.i = */ send_i,
/* read.j = */ send_j,
/* read.k = */ send_k,
/* read.jStride = */ level->my_boxes[ghostsToSend[ghost].sendBox].jStride,
/* read.kStride = */ level->my_boxes[ghostsToSend[ghost].sendBox].kStride,
/* read.scale = */ 1,
/* write.box = */ ghostsToSend[ghost].recvBox,
/* write.ptr = */ NULL,
/* write.i = */ recv_i,
/* write.j = */ recv_j,
/* write.k = */ recv_k,
/* write.jStride = */ level->my_boxes[ghostsToSend[ghost].recvBox].jStride,
/* write.kStride = */ level->my_boxes[ghostsToSend[ghost].recvBox].kStride,
/* write.scale = */ 1,
/* blockcopy_i = */ BLOCKCOPY_TILE_I, // default
/* blockcopy_j = */ BLOCKCOPY_TILE_J, // default
/* blockcopy_k = */ BLOCKCOPY_TILE_K, // default
/* subtype = */ 0
);
else // append to the MPI pack list...
append_block_to_list(&(level->exchange_ghosts[shape].blocks[0]),&(level->exchange_ghosts[shape].allocated_blocks[0]),&(level->exchange_ghosts[shape].num_blocks[0]),
/* dim.i = */ dim_i,
/* dim.j = */ dim_j,
/* dim.k = */ dim_k,
/* read.box = */ ghostsToSend[ghost].sendBox,
/* read.ptr = */ NULL,
/* read.i = */ send_i,
/* read.j = */ send_j,
/* read.k = */ send_k,
/* read.jStride = */ level->my_boxes[ghostsToSend[ghost].sendBox].jStride,
/* read.kStride = */ level->my_boxes[ghostsToSend[ghost].sendBox].kStride,
/* read.scale = */ 1,
/* write.box = */ -1,
/* write.ptr = */ level->exchange_ghosts[shape].send_buffers[neighbor], // NOTE, 1. count _sizes, 2. allocate _buffers, 3. populate blocks
/* write.i = */ level->exchange_ghosts[shape].send_sizes[neighbor], // current offset in the MPI send buffer
/* write.j = */ 0,
/* write.k = */ 0,
/* write.jStride = */ dim_i, // contiguous block
/* write.kStride = */ dim_i*dim_j, // contiguous block
/* write.scale = */ 1,
/* blockcopy_i = */ BLOCKCOPY_TILE_I, // default
/* blockcopy_j = */ BLOCKCOPY_TILE_J, // default
/* blockcopy_k = */ BLOCKCOPY_TILE_K, // default
/* subtype = */ 0
);}
if(neighbor>=0)level->exchange_ghosts[shape].send_sizes[neighbor]+=dim_i*dim_j*dim_k;
} // ghost for-loop
} // stage for-loop
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// free temporary storage...
free(ghostsToSend);
free(sendRanks);
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// traverse my list of boxes and create a lists of neighboring boxes and neighboring ranks
GZ_type *ghostsToRecv = (GZ_type*)malloc(26*level->num_my_boxes*sizeof(GZ_type)); // There are at most 26 neighbors per box.
int *recvRanks = ( int*)malloc(26*level->num_my_boxes*sizeof( int)); // There are at most 26 neighbors per box.
if(level->num_my_boxes>0){
if(ghostsToRecv == NULL){fprintf(stderr,"malloc failed - build_exchange_ghosts/ghostsToRecv\n");exit(0);}
if(recvRanks == NULL){fprintf(stderr,"malloc failed - build_exchange_ghosts/recvRanks \n");exit(0);}
}
numGhosts = 0;
numGhostsRemote = 0;
for(recvBox=0;recvBox<level->num_my_boxes;recvBox++){
int di,dj,dk;
for(dk=-1;dk<=1;dk++){
for(dj=-1;dj<=1;dj++){
for(di=-1;di<=1;di++){
int dir = 13+di+3*dj+9*dk;if(CommunicateThisDir[dir]){
int myBoxID = level->my_boxes[recvBox].global_box_id;
int myBox_i = level->my_boxes[recvBox].low.i / level->box_dim;
int myBox_j = level->my_boxes[recvBox].low.j / level->box_dim;
int myBox_k = level->my_boxes[recvBox].low.k / level->box_dim;
int neighborBoxID = -1;
if(level->boundary_condition.type == BC_PERIODIC){
int neighborBox_i = ( myBox_i + di + level->boxes_in.i) % level->boxes_in.i;
int neighborBox_j = ( myBox_j + dj + level->boxes_in.j) % level->boxes_in.j;
int neighborBox_k = ( myBox_k + dk + level->boxes_in.k) % level->boxes_in.k;
neighborBoxID = neighborBox_i + neighborBox_j*level->boxes_in.i + neighborBox_k*level->boxes_in.i*level->boxes_in.j;
}else{
int neighborBox_i = ( myBox_i + di );
int neighborBox_j = ( myBox_j + dj );
int neighborBox_k = ( myBox_k + dk );
if( (neighborBox_i>=0) && (neighborBox_i<level->boxes_in.i) &&
(neighborBox_j>=0) && (neighborBox_j<level->boxes_in.j) &&
(neighborBox_k>=0) && (neighborBox_k<level->boxes_in.k) ){ // i.e. the neighbor is a valid box
neighborBoxID = neighborBox_i + neighborBox_j*level->boxes_in.i + neighborBox_k*level->boxes_in.i*level->boxes_in.j;
}
}
if(neighborBoxID>=0){
if( (level->rank_of_box[neighborBoxID] != -1) && (level->rank_of_box[neighborBoxID] != level->my_rank) ){
ghostsToRecv[numGhosts].sendRank = level->rank_of_box[neighborBoxID];
ghostsToRecv[numGhosts].sendBoxID = neighborBoxID;
ghostsToRecv[numGhosts].sendBox = -1;
ghostsToRecv[numGhosts].sendDir = 26-dir;
ghostsToRecv[numGhosts].recvRank = level->my_rank;
ghostsToRecv[numGhosts].recvBoxID = myBoxID;
ghostsToRecv[numGhosts].recvBox = recvBox;
numGhosts++;
recvRanks[numGhostsRemote++] = level->rank_of_box[neighborBoxID];
}}
}}}}
}
// sort boxes by sendRank then by sendBoxID... ensures the recvs and receive buffers are always sorted by sendBoxID...
qsort(ghostsToRecv,numGhosts ,sizeof(GZ_type),qsortGZ );
// sort the lists of neighboring ranks and remove duplicates...
qsort(recvRanks ,numGhostsRemote,sizeof( int),qsortInt);
int numRecvRanks=0;_rank=-1;for(ghost=0;ghost<numGhostsRemote;ghost++)if(recvRanks[ghost] != _rank){_rank=recvRanks[ghost];recvRanks[numRecvRanks++]=recvRanks[ghost];}
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// in a two-stage process, traverse the list of ghosts and allocate the unpack lists as well as the MPI buffers, and then populate the unpack list
level->exchange_ghosts[shape].num_recvs = numRecvRanks;
level->exchange_ghosts[shape].recv_ranks = (int*)malloc(numRecvRanks*sizeof(int));
level->exchange_ghosts[shape].recv_sizes = (int*)malloc(numRecvRanks*sizeof(int));
level->exchange_ghosts[shape].recv_buffers = (double**)malloc(numRecvRanks*sizeof(double*));
if(numRecvRanks>0){
if(level->exchange_ghosts[shape].recv_ranks ==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].recv_ranks\n",shape);exit(0);}
if(level->exchange_ghosts[shape].recv_sizes ==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].recv_sizes\n",shape);exit(0);}
if(level->exchange_ghosts[shape].recv_buffers==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].recv_buffers\n",shape);exit(0);}
}
level->exchange_ghosts[shape].blocks[2] = NULL;
level->exchange_ghosts[shape].num_blocks[2] = 0;
level->exchange_ghosts[shape].allocated_blocks[2] = 0;
for(stage=0;stage<=1;stage++){
// stage=0... traverse the list and calculate the buffer sizes
// stage=1... allocate MPI recv buffers, traverse the list, and populate the unpack/local lists...
int neighbor;
for(neighbor=0;neighbor<numRecvRanks;neighbor++){
if(stage==1){
level->exchange_ghosts[shape].recv_buffers[neighbor] = (double*)malloc(level->exchange_ghosts[shape].recv_sizes[neighbor]*sizeof(double));
if(level->exchange_ghosts[shape].recv_sizes[neighbor]>0)
if(level->exchange_ghosts[shape].recv_buffers[neighbor]==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].recv_buffers[neighbor]\n",shape);exit(0);}
memset(level->exchange_ghosts[shape].recv_buffers[neighbor], 0,level->exchange_ghosts[shape].recv_sizes[neighbor]*sizeof(double));
}
level->exchange_ghosts[shape].recv_ranks[neighbor]=recvRanks[neighbor];
level->exchange_ghosts[shape].recv_sizes[neighbor]=0;
}
for(ghost=0;ghost<numGhosts;ghost++){
int dim_i=-1, dim_j=-1, dim_k=-1;
//int send_i=-1,send_j=-1,send_k=-1;
int recv_i=-1,recv_j=-1,recv_k=-1;
// decode ghostsToRecv[ghost].sendDir (direction sent) into di/dj/dk
int di = ((ghostsToRecv[ghost].sendDir % 3) )-1;
int dj = ((ghostsToRecv[ghost].sendDir % 9)/3)-1;
int dk = ((ghostsToRecv[ghost].sendDir / 9) )-1;
switch(di){ // direction relative to sender
case -1:dim_i=level->box_ghosts;recv_i= level->box_dim; break;
case 0:dim_i=level->box_dim; recv_i=0; break;
case 1:dim_i=level->box_ghosts;recv_i=0-level->box_ghosts;break;
}
switch(dj){ // direction relative to sender
case -1:dim_j=level->box_ghosts;recv_j= level->box_dim; break;
case 0:dim_j=level->box_dim; recv_j=0; break;
case 1:dim_j=level->box_ghosts;recv_j=0-level->box_ghosts;break;
}
switch(dk){ // direction relative to sender
case -1:dim_k=level->box_ghosts;recv_k= level->box_dim; break;
case 0:dim_k=level->box_dim; recv_k=0; break;
case 1:dim_k=level->box_ghosts;recv_k=0-level->box_ghosts;break;
}
// determine if this ghost requires a pack or local exchange
neighbor=0;while(level->exchange_ghosts[shape].recv_ranks[neighbor] != ghostsToRecv[ghost].sendRank)neighbor++;
if(stage==1)append_block_to_list(&(level->exchange_ghosts[shape].blocks[2]),&(level->exchange_ghosts[shape].allocated_blocks[2]),&(level->exchange_ghosts[shape].num_blocks[2]),
/*dim.i = */ dim_i,
/*dim.j = */ dim_j,
/*dim.k = */ dim_k,
/*read.box = */ -1,
/*read.ptr = */ level->exchange_ghosts[shape].recv_buffers[neighbor], // NOTE, 1. count _sizes, 2. allocate _buffers, 3. populate blocks
/*read.i = */ level->exchange_ghosts[shape].recv_sizes[neighbor], // current offset in the MPI recv buffer
/*read.j = */ 0,
/*read.k = */ 0,
/*read.jStride = */ dim_i, // contiguous block
/*read.kStride = */ dim_i*dim_j, // contiguous block
/*read.scale = */ 1,
/*write.box = */ ghostsToRecv[ghost].recvBox,
/*write.ptr = */ NULL,
/*write.i = */ recv_i,
/*write.j = */ recv_j,
/*write.k = */ recv_k,
/*write.jStride = */ level->my_boxes[ghostsToRecv[ghost].recvBox].jStride,
/*write.kStride = */ level->my_boxes[ghostsToRecv[ghost].recvBox].kStride,
/*write.scale = */ 1,
/* blockcopy_i = */ BLOCKCOPY_TILE_I, // default
/* blockcopy_j = */ BLOCKCOPY_TILE_J, // default
/* blockcopy_k = */ BLOCKCOPY_TILE_K, // default
/* subtype = */ 0
);
if(neighbor>=0)level->exchange_ghosts[shape].recv_sizes[neighbor]+=dim_i*dim_j*dim_k;
} // ghost for-loop
} // stage for-loop
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// free temporary storage...
free(ghostsToRecv);
free(recvRanks);
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// malloc MPI requests/status arrays
#ifdef USE_MPI
level->exchange_ghosts[shape].requests = (MPI_Request*)malloc((level->exchange_ghosts[shape].num_sends+level->exchange_ghosts[shape].num_recvs)*sizeof(MPI_Request));
level->exchange_ghosts[shape].status = (MPI_Status *)malloc((level->exchange_ghosts[shape].num_sends+level->exchange_ghosts[shape].num_recvs)*sizeof(MPI_Status ));
if((level->exchange_ghosts[shape].num_sends+level->exchange_ghosts[shape].num_recvs)>0){
if(level->exchange_ghosts[shape].requests==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].requests\n",shape);exit(0);}
if(level->exchange_ghosts[shape].status ==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].status\n",shape);exit(0);}
}
#endif
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#ifdef BLOCK_SPATIAL_SORT
// sort all the resultant blocks by box,k,j,i (good locality)
qsort(level->exchange_ghosts[shape].blocks[0],level->exchange_ghosts[shape].num_blocks[0],sizeof(blockCopy_type),qsortBlock);
qsort(level->exchange_ghosts[shape].blocks[1],level->exchange_ghosts[shape].num_blocks[1],sizeof(blockCopy_type),qsortBlock);
qsort(level->exchange_ghosts[shape].blocks[2],level->exchange_ghosts[shape].num_blocks[2],sizeof(blockCopy_type),qsortBlock);
#endif
}
//---------------------------------------------------------------------------------------------------------------------------------------------------
// create the pointers in level_type to the contiguous vector FP data (useful for bulk copies to/from accelerators)
// create the pointers in each box to their respective segment of the level's vector FP data (useful for box-relative operators)
// if( (level->numVectors > 0) && (numVectors > level->numVectors) ) then allocate additional space for (numVectors-level->numVectors) and copy old leve->numVectors data
void create_vectors(level_type *level, int numVectors){
if(numVectors <= level->numVectors)return; // already have enough space
double * old_vectors_base = level->vectors_base; // save a pointer to the originally allocated data for subsequent free()
double * old_vector0 = NULL;
if(level->numVectors>0)old_vector0 = level->vectors[0]; // save a pointer to old FP data to copy
// calculate the size of each box...
level->box_jStride = (level->box_dim+2*level->box_ghosts);while(level->box_jStride % BOX_ALIGN_JSTRIDE)level->box_jStride++; // pencil
level->box_kStride = level->box_jStride*(level->box_dim+2*level->box_ghosts);while(level->box_kStride % BOX_ALIGN_KSTRIDE)level->box_kStride++; // plane
level->box_volume = level->box_kStride*(level->box_dim+2*level->box_ghosts);while(level->box_volume % BOX_ALIGN_VOLUME )level->box_volume++; // volume
#define VECTOR_MALLOC_BULK
#ifdef VECTOR_MALLOC_BULK
// allocate one aligned, double-precision array and divide it among vectors...
uint64_t malloc_size = (uint64_t)numVectors*level->num_my_boxes*level->box_volume*sizeof(double) + 4096;
level->vectors_base = (double*)malloc(malloc_size);
if((numVectors>0)&&(level->vectors_base==NULL)){fprintf(stderr,"malloc failed - level->vectors_base\n");exit(0);}
double * tmpbuf = level->vectors_base;
while( (uint64_t)(tmpbuf+level->box_ghosts*(1+level->box_jStride+level->box_kStride)) & 0xff ){tmpbuf++;} // align first *non-ghost* zone element of first component to a 256-Byte boundary
uint64_t ofs;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(ofs=0;ofs<(uint64_t)numVectors*level->num_my_boxes*level->box_volume;ofs++){tmpbuf[ofs]=0.0;} // Faster in MPI+OpenMP environments, but not NUMA-aware
// if there is existing FP data... copy it, then free old data and pointer array
if(level->numVectors>0){
memcpy(tmpbuf,old_vector0,(uint64_t)level->numVectors*level->num_my_boxes*level->box_volume*sizeof(double)); // FIX... omp thread ???
if(old_vectors_base)free(old_vectors_base); // free old data...
}
// allocate an array of pointers which point to the union of boxes for each vector
// NOTE, this requires just one copyin per vector to an accelerator rather than requiring one copyin per box per vector
if(level->numVectors>0)free(level->vectors); // free any previously allocated vector array
level->vectors = (double **)malloc(numVectors*sizeof(double*));
if((numVectors>0)&&(level->vectors==NULL)){fprintf(stderr,"malloc failed - level->vectors\n");exit(0);}
uint64_t c;for(c=0;c<numVectors;c++){level->vectors[c] = tmpbuf + (uint64_t)c*level->num_my_boxes*level->box_volume;}
#else
// allocate vectors individually (simple, but may cause conflict misses)
double ** old_vectors = level->vectors;
level->vectors = (double **)malloc(numVectors*sizeof(double*));
uint64_t c;
for(c= 0;c<level->numVectors;c++){level->vectors[c] = old_vectors[c];}
for(c=level->numVectors;c< numVectors;c++){
level->vectors[c] = (double*)malloc((uint64_t)level->num_my_boxes*level->box_volume*sizeof(double));
uint64_t ofs;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(ofs=0;ofs<(uint64_t)level->num_my_boxes*level->box_volume;ofs++){level->vectors[c][ofs]=0.0;} // Faster in MPI+OpenMP environments, but not NUMA-aware
}
free(old_vectors);
#endif
// build the list of boxes...
int box=0;
int i,j,k;
for(k=0;k<level->boxes_in.k;k++){
for(j=0;j<level->boxes_in.j;j++){
for(i=0;i<level->boxes_in.i;i++){
int jStride = level->boxes_in.i;
int kStride = level->boxes_in.i*level->boxes_in.j;
int b=i + j*jStride + k*kStride;
if(level->rank_of_box[b]==level->my_rank){
if(level->numVectors>0)free(level->my_boxes[box].vectors); // free previously allocated vector array
level->my_boxes[box].vectors = (double **)malloc(numVectors*sizeof(double*));
if((numVectors>0)&&(level->my_boxes[box].vectors==NULL)){fprintf(stderr,"malloc failed - level->my_boxes[box].vectors\n");exit(0);}
uint64_t c;for(c=0;c<numVectors;c++){level->my_boxes[box].vectors[c] = level->vectors[c] + (uint64_t)box*level->box_volume;}
level->my_boxes[box].numVectors = numVectors;
level->my_boxes[box].dim = level->box_dim;
level->my_boxes[box].ghosts = level->box_ghosts;
level->my_boxes[box].jStride = level->box_jStride;
level->my_boxes[box].kStride = level->box_kStride;
level->my_boxes[box].volume = level->box_volume;
level->my_boxes[box].low.i = i*level->box_dim;
level->my_boxes[box].low.j = j*level->box_dim;
level->my_boxes[box].low.k = k*level->box_dim;
level->my_boxes[box].global_box_id = b;
box++;
}}}}
// level now has created/initialized vector FP data
level->numVectors = numVectors;
}
//---------------------------------------------------------------------------------------------------------------------------------------------------
// create a level by populating the basic data structure, distribute boxes within the level among processes, allocate memory, and create any auxilliaries
// box_ghosts must be >= stencil_get_radius()
// numVectors represents an estimate of the number of vectors needed in this level. Additional vectors can be added via subsequent calls to create_vectors()
void create_level(level_type *level, int boxes_in_i, int box_dim, int box_ghosts, int numVectors, int domain_boundary_condition, int my_rank, int num_ranks){
int box;
int TotalBoxes = boxes_in_i*boxes_in_i*boxes_in_i;
if(my_rank==0){
//if(domain_boundary_condition==BC_DIRICHLET)fprintf(stdout,"\nattempting to create a %d^3 level (with Dirichlet BC) using a %d^3 grid of %d^3 boxes and %d tasks...\n",box_dim*boxes_in_i,boxes_in_i,box_dim,num_ranks);
//if(domain_boundary_condition==BC_PERIODIC )fprintf(stdout,"\nattempting to create a %d^3 level (with Periodic BC) using a %d^3 grid of %d^3 boxes and %d tasks...\n", box_dim*boxes_in_i,boxes_in_i,box_dim,num_ranks);
fprintf(stdout,"\nattempting to create a %d^3 level from %d x %d^3 boxes distributed among %d tasks...\n", box_dim*boxes_in_i,TotalBoxes,box_dim,num_ranks);
if(domain_boundary_condition==BC_DIRICHLET)fprintf(stdout," boundary condition = BC_DIRICHLET\n");
if(domain_boundary_condition==BC_PERIODIC )fprintf(stdout," boundary condition = BC_PERIODIC\n");
}
int omp_threads = 1;
#ifdef _OPENMP
#pragma omp parallel
{
#pragma omp master
{
omp_threads = omp_get_num_threads();
}
}
#endif
if(box_ghosts < stencil_get_radius() ){
if(my_rank==0)fprintf(stderr,"ghosts(%d) must be >= stencil_get_radius(%d)\n",box_ghosts,stencil_get_radius());
exit(0);
}
level->box_dim = box_dim;
level->box_ghosts = box_ghosts;
level->numVectors = 0; // no vectors have been allocated yet
level->vectors_base = NULL; // pointer returned by bulk malloc
level->vectors = NULL; // pointers to individual vectors
level->boxes_in.i = boxes_in_i;
level->boxes_in.j = boxes_in_i;
level->boxes_in.k = boxes_in_i;
level->dim.i = box_dim*level->boxes_in.i;
level->dim.j = box_dim*level->boxes_in.j;
level->dim.k = box_dim*level->boxes_in.k;
level->active = 1;
level->my_rank = my_rank;
level->num_ranks = num_ranks;
level->boundary_condition.type = domain_boundary_condition;
level->must_subtract_mean = -1;
level->num_threads = omp_threads;
level->my_blocks = NULL;
level->num_my_blocks = 0;
level->allocated_blocks = 0;
level->tag = log2(level->dim.i);
level->fluxes = NULL;
// allocate 3D array of integers to hold the MPI rank of the corresponding box and initialize to -1 (unassigned)
level->rank_of_box = (int*)malloc(level->boxes_in.i*level->boxes_in.j*level->boxes_in.k*sizeof(int));
if(level->rank_of_box==NULL){fprintf(stderr,"malloc of level->rank_of_box failed\n");exit(0);}
for(box=0;box<level->boxes_in.i*level->boxes_in.j*level->boxes_in.k;box++){level->rank_of_box[box]=-1;} // -1 denotes that there is no actual box assigned to this region
// parallelize the level (i.e. assign a process rank to each box)...
#ifdef DECOMPOSE_LEX
// lexicographical ordering... good load balance, potentially high bisection bandwidth requirements, bad surface:volume ratio when #boxes/proc is large
if(my_rank==0){fprintf(stdout," Decomposing level via lexicographical ordering... ");fflush(stdout);}
decompose_level_lex(level->rank_of_box,level->boxes_in.i,level->boxes_in.j,level->boxes_in.k,num_ranks);
#elif DECOMPOSE_BISECTION_SPECIAL
// recursive partitioning by primes
if(my_rank==0){fprintf(stdout," Decomposing level via partitioning by primes... ");fflush(stdout);}
decompose_level_bisection_special(level->rank_of_box,level->boxes_in.i,level->boxes_in.i*level->boxes_in.j,0,0,0,level->boxes_in.i,level->boxes_in.j,level->boxes_in.k,0,num_ranks);
#elif DECOMPOSE_BISECTION
// recursive bisection
if(my_rank==0){fprintf(stdout," Decomposing level via recursive bisection... ");fflush(stdout);}
decompose_level_bisection(level->rank_of_box,level->boxes_in.i,level->boxes_in.i*level->boxes_in.j,0,0,0,level->boxes_in.i,level->boxes_in.j,level->boxes_in.k,num_ranks,0,level->boxes_in.i*level->boxes_in.j*level->boxes_in.k);
#else//#elif DECOMPOSE_ZMORT
if(my_rank==0){fprintf(stdout," Decomposing level via Z-mort ordering... ");fflush(stdout);}
#if 0 // Z-Mort over a power of two bounding box skipping boxes outside the domain
int idim_padded=1;while(idim_padded<level->boxes_in.i)idim_padded*=2;
int jdim_padded=1;while(jdim_padded<level->boxes_in.j)jdim_padded*=2;
int kdim_padded=1;while(kdim_padded<level->boxes_in.k)kdim_padded*=2;
#else // Z-Mort over the valid domain wtih odd-sized base cases (i.e. zmort on 3x3)
int idim_padded=level->boxes_in.i;
int jdim_padded=level->boxes_in.j;
int kdim_padded=level->boxes_in.k;
#endif
decompose_level_zmort(level->rank_of_box,level->boxes_in.i,level->boxes_in.j,level->boxes_in.k,0,0,0,idim_padded,jdim_padded,kdim_padded,num_ranks,0,level->boxes_in.i*level->boxes_in.j*level->boxes_in.k);
#endif
if(my_rank==0){fprintf(stdout,"done\n");fflush(stdout);}
//print_decomposition(level);// for debug purposes only
// calculate how many boxes I own...
level->num_my_boxes=0;
for(box=0;box<level->boxes_in.i*level->boxes_in.j*level->boxes_in.k;box++){if(level->rank_of_box[box]==level->my_rank)level->num_my_boxes++;}
level->my_boxes = (box_type*)malloc(level->num_my_boxes*sizeof(box_type));
if((level->num_my_boxes>0)&&(level->my_boxes==NULL)){fprintf(stderr,"malloc failed - create_level/level->my_boxes\n");exit(0);}
// allocate flattened vector FP data and create pointers...
if(my_rank==0){fprintf(stdout," Allocating vectors... ");fflush(stdout);}
create_vectors(level,numVectors);
if(my_rank==0){fprintf(stdout,"done\n");fflush(stdout);}
// Build and auxilarlly data structure that flattens boxes into blocks...
for(box=0;box<level->num_my_boxes;box++){
int blockcopy_i = BLOCKCOPY_TILE_I;
int blockcopy_j = BLOCKCOPY_TILE_J;
int blockcopy_k = BLOCKCOPY_TILE_K;
append_block_to_list(&(level->my_blocks),&(level->allocated_blocks),&(level->num_my_blocks),
/* dim.i = */ level->my_boxes[box].dim,
/* dim.j = */ level->my_boxes[box].dim,
/* dim.k = */ level->my_boxes[box].dim,
/* read.box = */ box,
/* read.ptr = */ NULL,
/* read.i = */ 0,
/* read.j = */ 0,
/* read.k = */ 0,
/* read.jStride = */ level->my_boxes[box].jStride,
/* read.kStride = */ level->my_boxes[box].kStride,
/* read.scale = */ 1,
/* write.box = */ box,
/* write.ptr = */ NULL,
/* write.i = */ 0,
/* write.j = */ 0,
/* write.k = */ 0,
/* write.jStride = */ level->my_boxes[box].jStride,
/* write.kStride = */ level->my_boxes[box].kStride,
/* write.scale = */ 1,
/* blockcopy_i = */ blockcopy_i,
/* blockcopy_j = */ blockcopy_j,
/* blockcopy_k = */ blockcopy_k,
/* subtype = */ 0
);
}
// build an assist structure for Gauss Seidel Red Black that would facilitate unrolling and SIMDization...
level->RedBlack_base = NULL;
level->RedBlack_FP = NULL;
if(level->num_my_boxes){
int i,j;
int kStride = level->my_boxes[0].kStride;
int jStride = level->my_boxes[0].jStride;
level->RedBlack_base = (double*)malloc(2*kStride*sizeof(double)+256); // used for free()
level->RedBlack_FP = level->RedBlack_base; // aligned version
// align first *non-ghost* zone element to a 64-Byte boundary...
while( (uint64_t)(level->RedBlack_FP + level->box_ghosts*(1+level->box_jStride)) & 0x3f ){level->RedBlack_FP++;}
// initialize RedBlack array...
for(j=0-level->box_ghosts;j<level->box_dim+level->box_ghosts;j++){
for(i=0-level->box_ghosts;i<level->box_dim+level->box_ghosts;i++){
int ij = (i+level->box_ghosts) + (j+level->box_ghosts)*jStride;
if((i^j^1)&0x1){
level->RedBlack_FP[ij ]=1.0;
level->RedBlack_FP[ij+kStride]=0.0;
}else{
level->RedBlack_FP[ij ]=0.0;
level->RedBlack_FP[ij+kStride]=1.0;
}
// Never update ghost zones
//if( (i<0) || (i>=level->box_dim) || (j<0) || (j>=level->box_dim) ){
// level->RedBlack_FP[ij ]=0.0;
// level->RedBlack_FP[ij+kStride]=0.0;
//}
}}
}
int shape;
// create mini program for each stencil shape to perform a ghost zone exchange...
for(shape=0;shape<STENCIL_MAX_SHAPES;shape++)build_exchange_ghosts( level,shape);
// create mini program for each stencil shape to perform a boundary condition...
for(shape=0;shape<STENCIL_MAX_SHAPES;shape++)build_boundary_conditions(level,shape);
// duplicate MPI_COMM_WORLD to be the communicator for each level
#ifdef USE_MPI
if(my_rank==0){fprintf(stdout," Duplicating MPI_COMM_WORLD... ");fflush(stdout);}
double time_start = MPI_Wtime();
MPI_Comm_dup(MPI_COMM_WORLD,&level->MPI_COMM_ALLREDUCE);
double time_end = MPI_Wtime();
double time_in_comm_dup = 0;
double time_in_comm_dup_send = time_end-time_start;
MPI_Allreduce(&time_in_comm_dup_send,&time_in_comm_dup,1,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD);
if(my_rank==0){fprintf(stdout,"done (%0.6f seconds)\n",time_in_comm_dup);fflush(stdout);}
#endif
// report on potential load imbalance
int BoxesPerProcess = level->num_my_boxes;
#ifdef USE_MPI
int BoxesPerProcessSend = level->num_my_boxes;
MPI_Allreduce(&BoxesPerProcessSend,&BoxesPerProcess,1,MPI_INT,MPI_MAX,MPI_COMM_WORLD);
#endif
if(my_rank==0){fprintf(stdout," Calculating boxes per process... target=%0.3f, max=%d\n",(double)TotalBoxes/(double)num_ranks,BoxesPerProcess);}
}
//---------------------------------------------------------------------------------------------------------------------------------------------------
// zeros are the timers within this level
// useful if one wishes to separate setup(build) timing from solve timing
void reset_level_timers(level_type *level){
// cycle counters information...
level->timers.smooth = 0;
level->timers.apply_op = 0;
level->timers.residual = 0;
level->timers.blas1 = 0;
level->timers.blas3 = 0;
level->timers.boundary_conditions = 0;
level->timers.restriction_total = 0;
level->timers.restriction_pack = 0;
level->timers.restriction_local = 0;
level->timers.restriction_unpack = 0;
level->timers.restriction_recv = 0;
level->timers.restriction_send = 0;
level->timers.restriction_wait = 0;
level->timers.interpolation_total = 0;
level->timers.interpolation_pack = 0;
level->timers.interpolation_local = 0;
level->timers.interpolation_unpack = 0;
level->timers.interpolation_recv = 0;
level->timers.interpolation_send = 0;
level->timers.interpolation_wait = 0;
level->timers.ghostZone_total = 0;
level->timers.ghostZone_pack = 0;
level->timers.ghostZone_local = 0;
level->timers.ghostZone_unpack = 0;
level->timers.ghostZone_recv = 0;
level->timers.ghostZone_send = 0;
level->timers.ghostZone_wait = 0;
level->timers.collectives = 0;
level->timers.Total = 0;
// solver events information...
level->Krylov_iterations = 0;
level->CAKrylov_formations_of_G = 0;
level->vcycles_from_this_level = 0;
}
//---------------------------------------------------------------------------------------------------------------------------------------------------
// free all memory allocated by this level
// n.b. in some cases a malloc was used as the basis for an array of pointers. As such free(x[0])
void destroy_level(level_type *level){
int i,j;
if(level->my_rank==0){fprintf(stdout,"attempting to free the %5d^3 level... ",level->dim.i);fflush(stdout);}
// box ...
for(i=0;i<level->num_my_boxes;i++)if(level->my_boxes[i].vectors)free(level->my_boxes[i].vectors);
// misc ...
if(level->rank_of_box )free(level->rank_of_box);
if(level->my_boxes )free(level->my_boxes);
if(level->my_blocks )free(level->my_blocks);
if(level->RedBlack_base)free(level->RedBlack_base);
// FP vector data...
#ifdef VECTOR_MALLOC_BULK
if(level->vectors_base)free(level->vectors_base);
if(level->vectors )free(level->vectors);
#else
for(i=0;i<level->numVectors;i++)if(level->vectors[i])free(level->vectors[i]);
if(level->vectors )free(level->vectors);
#endif
// boundary condition mini program...
for(i=0;i<STENCIL_MAX_SHAPES;i++){
if(level->boundary_condition.blocks[i])free(level->boundary_condition.blocks[i]);
}
// ghost zone exchange mini programs...
for(i=0;i<STENCIL_MAX_SHAPES;i++){
if(level->exchange_ghosts[i].num_recvs>0){
for(j=0;j<level->exchange_ghosts[i].num_recvs;j++)if(level->exchange_ghosts[i].recv_buffers[j])free(level->exchange_ghosts[i].recv_buffers[j]);
if(level->exchange_ghosts[i].recv_buffers)free(level->exchange_ghosts[i].recv_buffers);
if(level->exchange_ghosts[i].recv_ranks )free(level->exchange_ghosts[i].recv_ranks );
if(level->exchange_ghosts[i].recv_sizes )free(level->exchange_ghosts[i].recv_sizes );
}
if(level->exchange_ghosts[i].num_sends>0){
for(j=0;j<level->exchange_ghosts[i].num_sends;j++)if(level->exchange_ghosts[i].send_buffers[j])free(level->exchange_ghosts[i].send_buffers[j]);
if(level->exchange_ghosts[i].send_buffers)free(level->exchange_ghosts[i].send_buffers);
if(level->exchange_ghosts[i].send_ranks )free(level->exchange_ghosts[i].send_ranks );
if(level->exchange_ghosts[i].send_sizes )free(level->exchange_ghosts[i].send_sizes );
}
if(level->exchange_ghosts[i].blocks[0] )free(level->exchange_ghosts[i].blocks[0] );
if(level->exchange_ghosts[i].blocks[1] )free(level->exchange_ghosts[i].blocks[1] );
if(level->exchange_ghosts[i].blocks[2] )free(level->exchange_ghosts[i].blocks[2] );
#ifdef USE_MPI
if(level->exchange_ghosts[i].requests )free(level->exchange_ghosts[i].requests );
if(level->exchange_ghosts[i].status )free(level->exchange_ghosts[i].status );
#endif
}
if(level->my_rank==0){fprintf(stdout,"done\n");}
}
|
primitive.c | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <float.h>
#include <math.h>
#include "primitive.h"
#define TINYOBJLOADER_IMPLEMENTATION
#include "tiny_obj_loader.h"
float vec_dot(vec_t A, vec_t B) {
return (A.x * B.x) + (A.y * B.y) + (A.z * B.z);
}
vec_t vec_cross(vec_t A, vec_t B) {
return (vec_t){((A.y * B.z) - (A.z * B.y)),
((A.z * B.x) - (A.x * B.z)),
((A.x * B.y) - (A.y * B.x))};
}
vec_t vec_scale(vec_t A, float s) {
return (vec_t){A.x * s, A.y * s, A.z * s};
}
vec_t vec_add(vec_t A, vec_t B) {
return (vec_t){A.x + B.x, A.y + B.y, A.z + B.z};
}
vec_t vec_sub(vec_t A, vec_t B) {
return (vec_t){A.x - B.x, A.y - B.y, A.z - B.z};
}
vec_t vec_rotate(vec_t A, vec_t B, float t) {
// Ru(t)x = u * DOT(u, x) + cos(t) * CROSS(CROSS(u, x), u) + sin(t) * CROSS(u, x)
vec_t w = vec_cross(B, A);
vec_t v = vec_cross(w, B);
float dAB = ((A.x * B.x) + (A.y * B.y) + (A.z * B.z));
float St = sin(t);
float Ct = cos(t);
return (vec_t){
((B.x * dAB) + (w.x * St) + (v.x * Ct)),
((B.y * dAB) + (w.y * St) + (v.y * Ct)),
((B.z * dAB) + (w.z * St) + (v.z * Ct))
};
}
model_t * load_model(const char * fname) {
tinyobj::attrib_t attrib;
std::vector<tinyobj::shape_t> shapes;
std::vector<tinyobj::material_t> materials;
std::string warn, err;
if(!tinyobj::LoadObj(&attrib, &shapes, &materials, &warn, &err, fname, NULL, true)) {
throw std::runtime_error(warn + err);
}
int vec_count = attrib.vertices.size() / 3;
vec_t * vec_list = (vec_t *) malloc(sizeof(vec_t) * vec_count);
memcpy(vec_list, attrib.vertices.data(), sizeof(vec_t) * vec_count);
// for(int i = 0; i < vec_count; i++) {
// vec_list[i] = (vec_t){attrib.vertices[i*3 + 0], attrib.vertices[i*3 + 1], attrib.vertices[i*3 + 2]};
// }
int primitive_count = 0;
for(unsigned long i = 0; i < shapes.size(); i++) {
primitive_count += shapes[i].mesh.indices.size() / 3;
}
primitive_t * primitive_list = (primitive_t *) malloc(sizeof(primitive_t) * primitive_count);
int primitive_index = 0;
for(unsigned long i = 0; i < shapes.size(); i++) {
for(unsigned long j = 0; j < shapes[i].mesh.indices.size(); j+= 3) {
tinyobj::index_t iA = shapes[i].mesh.indices[j];
tinyobj::index_t iB = shapes[i].mesh.indices[j+1];
tinyobj::index_t iC = shapes[i].mesh.indices[j+2];
primitive_list[primitive_index].type = TRIANGLE;
primitive_list[primitive_index].data.triangle = (triangle_t){iA.vertex_index, iB.vertex_index, iC.vertex_index};
primitive_index++;
}
}
model_t * rtn = (model_t *) malloc(sizeof(model_t));
rtn->vec_count = vec_count;
rtn->vec_list = vec_list;
rtn->primitive_count = primitive_count;
rtn->primitive_list = primitive_list;
return rtn;
}
model_t * aggregate_models(model_t ** model_list, int model_count) {
model_t * rtn = (model_t *) malloc(sizeof(model_t));
int vec_total = 0;
int primitive_total = 0;
for(int i = 0; i < model_count; i++) {
vec_total += model_list[i]->vec_count;
primitive_total += model_list[i]->primitive_count;
}
rtn->vec_count = vec_total;
rtn->vec_list = (vec_t *) malloc(sizeof(vec_t) * rtn->vec_count);
rtn->primitive_count = primitive_total;
rtn->primitive_list = (primitive_t *) malloc(sizeof(primitive_t) * primitive_total);
int vec_offset = 0;
int primitive_offset = 0;
for(int i = 0; i < model_count; i++) {
model_t * tmp = copy_model(model_list[i]);
transform_model(tmp);
memcpy(rtn->vec_list + vec_offset, tmp->vec_list, sizeof(vec_t) * tmp->vec_count);
memcpy(rtn->primitive_list + primitive_offset, tmp->primitive_list, sizeof(primitive_t) * tmp->primitive_count);
#pragma omp parallel for
for(int j = primitive_offset; j < primitive_offset + tmp->primitive_count; j++) {
switch (rtn->primitive_list[j].type) {
case TRIANGLE:
rtn->primitive_list[j].data.triangle.A += vec_offset;
rtn->primitive_list[j].data.triangle.B += vec_offset;
rtn->primitive_list[j].data.triangle.C += vec_offset;
break;
case ELLIPSOID:
rtn->primitive_list[j].data.ellipsoid.origin += vec_offset;
rtn->primitive_list[j].data.ellipsoid.radius += vec_offset;
break;
case CUBOID:
rtn->primitive_list[j].data.cuboid.min += vec_offset;
rtn->primitive_list[j].data.cuboid.max += vec_offset;
break;
default:
break;
}
}
vec_offset += tmp->vec_count;
primitive_offset += tmp->primitive_count;
destroy_model(tmp);
}
rtn->basis.i = (vec_t){1.0f, 0.0f, 0.0f};
rtn->basis.j = (vec_t){0.0f, 1.0f, 0.0f};
rtn->basis.k = (vec_t){0.0f, 0.0f, 1.0f};
rtn->basis.l = (vec_t){0.0f, 0.0f, 0.0f};
return rtn;
}
model_t * copy_model(model_t * target) {
model_t * rtn = (model_t *) malloc(sizeof(model_t));
rtn->vec_count = target->vec_count;
rtn->vec_list = (vec_t *) malloc(sizeof(vec_t) * rtn->vec_count);
memcpy(rtn->vec_list, target->vec_list, sizeof(vec_t) * rtn->vec_count);
rtn->primitive_count = target->primitive_count;
rtn->primitive_list = (primitive_t *) malloc(sizeof(primitive_t) * rtn->primitive_count);
memcpy(rtn->primitive_list, target->primitive_list, sizeof(primitive_t) * rtn->primitive_count);
rtn->basis = target->basis;
return rtn;
}
void transform_model(model_t * target) {
#pragma omp parallel for
for(int i = 0; i < target->vec_count; i++) {
vec_t * old = target->vec_list +i;
vec_t new_vec = (vec_t){
(old->x * target->basis.i.x) + (old->y * target->basis.j.x) + (old->z * target->basis.k.x) + target->basis.l.x,
(old->x * target->basis.i.y) + (old->y * target->basis.j.y) + (old->z * target->basis.k.y) + target->basis.l.y,
(old->x * target->basis.i.z) + (old->y * target->basis.j.z) + (old->z * target->basis.k.z) + target->basis.l.z
};
*old = new_vec;
}
target->basis.i = (vec_t){1.0f, 0.0f, 0.0f};
target->basis.j = (vec_t){0.0f, 1.0f, 0.0f};
target->basis.k = (vec_t){0.0f, 0.0f, 1.0f};
target->basis.l = (vec_t){0.0f, 0.0f, 0.0f};
}
void destroy_model(model_t * model) {
if (model) {
free(model->vec_list);
free(model->primitive_list);
free(model);
}
}
void get_bounds_triangle(triangle_t * triangle, vec_t * vec_list, vec_t * p_min, vec_t * p_mid, vec_t * p_max) {
vec_t *A = vec_list + triangle->A;
vec_t *B = vec_list + triangle->B;
vec_t *C = vec_list + triangle->C;
// Determine min value of A, B, C
p_min->x = fminf(A->x, fminf(B->x, C->x));
p_min->y = fminf(A->y, fminf(B->y, C->y));
p_min->z = fminf(A->z, fminf(B->z, C->z));
// Determine max value of A, B, C
p_max->x = fmaxf(A->x, fmaxf(B->x, C->x));
p_max->y = fmaxf(A->y, fmaxf(B->y, C->y));
p_max->z = fmaxf(A->z, fmaxf(B->z, C->z));
// Determine mid value of A, B, C
p_mid->x = (A->x + B->x + C->x) / 3.0f;
p_mid->y = (A->y + B->y + C->y) / 3.0f;
p_mid->z = (A->z + B->z + C->z) / 3.0f;
}
void get_bounds_ellipsoid(ellipsoid_t * ellipsoid, vec_t * vec_list, vec_t * p_min, vec_t * p_mid, vec_t * p_max) {
vec_t * origin = vec_list + ellipsoid->origin;
vec_t * radius = vec_list + ellipsoid->radius;
*p_min = vec_sub(*origin, *radius);
*p_mid = *origin;
*p_max = vec_add(*origin, *radius);
}
void get_bounds_cuboid(cuboid_t * cuboid, vec_t * vec_list, vec_t * p_min, vec_t * p_mid, vec_t * p_max) {
*p_min = *(vec_list + cuboid->min);
*p_max = *(vec_list + cuboid->max);
*p_mid = vec_scale(vec_add(*p_min, *p_max), 0.5f);
}
void get_bounds_primitive(primitive_t * primitive, vec_t * vec_list, vec_t * p_min, vec_t * p_mid, vec_t * p_max) {
switch (primitive->type) {
case TRIANGLE:
get_bounds_triangle(&primitive->data.triangle, vec_list, p_min, p_mid, p_max);
break;
case ELLIPSOID:
get_bounds_ellipsoid(&primitive->data.ellipsoid, vec_list, p_min, p_mid, p_max);
break;
case CUBOID:
get_bounds_cuboid(&primitive->data.cuboid, vec_list, p_min, p_mid, p_max);
break;
default:
break;
}
}
int overlap_bounds(vec_t * A_min, vec_t * A_max, vec_t * B_min, vec_t * B_max) {
return (A_min->x <= B_max->x && A_max->x >= B_min->x) &&
(A_min->y <= B_max->y && A_max->y >= B_min->y) &&
(A_min->z <= B_max->z && A_max->z >= B_min->z);
}
bvh_t * create_bvh(int primitive_count, primitive_t * primitive_list, vec_t * vec_list, vec_t * smin, vec_t * smax, int limit, int depth) {
// Recursive end cases:
// If depth is zero, or there are no primitives, return NULL.
if (depth < 0) return NULL;
if (primitive_count <= 0) return NULL;
// Create the bvh that will be returned.
bvh_t * rtn = (bvh_t *) malloc(sizeof(bvh_t));
// If either smin, or smax are null
// than new min and max must be determined
// Otherwise, rtn's min and max will be smin and smax.
unsigned char bSetMinMax = (smin == NULL || smax == NULL);
if(bSetMinMax) {
rtn->min = (vec_t){INFINITY, INFINITY, INFINITY};
rtn->max = (vec_t){-INFINITY, -INFINITY, -INFINITY};
} else {
rtn->min = *smin;
rtn->max = *smax;
}
// rtn's middle point will be determined by the average
// of all the given primitive's middle points.
rtn->mid = (vec_t){0.0, 0.0, 0.0};
int vec_count = 0;
for(int i = 0; i < primitive_count; i++) {
// get_bounds_primitive will populate p_min, p_mid, and p_max
// with the bounds from the given primitive.
vec_t p_min, p_mid, p_max;
get_bounds_primitive(primitive_list + i, vec_list, &p_min, &p_mid, &p_max);
// Accumulate middles
rtn->mid.x += p_mid.x;
rtn->mid.y += p_mid.y;
rtn->mid.z += p_mid.z;
vec_count++;
// If bounds must be determined, check for new min and max.
if (bSetMinMax) {
if(rtn->min.x > p_min.x) rtn->min.x = p_min.x;
if(rtn->min.y > p_min.y) rtn->min.y = p_min.y;
if(rtn->min.z > p_min.z) rtn->min.z = p_min.z;
if(rtn->max.x < p_max.x) rtn->max.x = p_max.x;
if(rtn->max.y < p_max.y) rtn->max.y = p_max.y;
if(rtn->max.z < p_max.z) rtn->max.z = p_max.z;
}
}
// Average accumulated values,
// with quick check for divide by zero
if (vec_count != 0) {
rtn->mid.x /= (float)vec_count;
rtn->mid.y /= (float)vec_count;
rtn->mid.z /= (float)vec_count;
}
// If middle is outside of the min and max bounds,
// reset middle to be geometric middle of the bounds.
if((rtn->mid.x < rtn->min.x) || (rtn->mid.x > rtn->max.x) || (rtn->mid.y < rtn->min.y) || (rtn->mid.y > rtn->max.y) || (rtn->mid.z < rtn->min.z) || (rtn->mid.z > rtn->max.z)) {
rtn->mid.x = (rtn->min.x + rtn->max.x) * 0.5f;
rtn->mid.y = (rtn->min.y + rtn->max.y) * 0.5f;
rtn->mid.z = (rtn->min.z + rtn->max.z) * 0.5f;
}
// IF: more children are to be made:
if (primitive_count > limit && depth > 0) {
// Itterate through each quadrant of the BVH
for(int i = 0; i < 8; i++) {
// Select the min and max for the quadrant.
vec_t tmp_min = (vec_t){(i & 0x1) ? rtn->mid.x : rtn->min.x,
(i & 0x2) ? rtn->mid.y : rtn->min.y,
(i & 0x4) ? rtn->mid.z : rtn->min.z};
vec_t tmp_max = (vec_t){(i & 0x1) ? rtn->max.x : rtn->mid.x,
(i & 0x2) ? rtn->max.y : rtn->mid.y,
(i & 0x4) ? rtn->max.z : rtn->mid.z};
// Make a temporary list to store the primitives that are in the quadrant
primitive_t * temp_primitive_list = (primitive_t *) malloc(sizeof(primitive_t) * primitive_count);
// For each primitive, check if it is in the quadrant
#pragma omp parallel for
for(int j = 0; j < primitive_count; j++) {
vec_t p_min = (vec_t){0.0f, 0.0f, 0.0f};
vec_t p_mid = (vec_t){0.0f, 0.0f, 0.0f};
vec_t p_max = (vec_t){0.0f, 0.0f, 0.0f};
get_bounds_primitive(primitive_list + j, vec_list, &p_min, &p_mid, &p_max);
if (overlap_bounds(&tmp_min, &tmp_max, &p_min, &p_max)) {
temp_primitive_list[j] = primitive_list[j];
} else {
temp_primitive_list[j].type = INVALID;
}
}
// reduce the list, removing the invalid values
int count = 0;
for(int j = 0; j < primitive_count; j++) {
if (temp_primitive_list[j].type != INVALID) {
temp_primitive_list[count] = temp_primitive_list[j];
count++;
}
}
// Recursive call to create child bvh
rtn->children[i] = create_bvh(count, temp_primitive_list, vec_list, &tmp_min, &tmp_max, limit, depth-1);
// Cleanup temp list
free(temp_primitive_list);
}
// Check number of children
int children_count = 0;
for(int i = 0; i < 8; i++) {
if (rtn->children[i] != NULL) children_count++;
}
if (children_count != 0) {
rtn->children_count = children_count;
rtn->primitive_list = NULL;
rtn->primitive_count = 0;
return rtn;
// If no children, return NULL.
} else {
free(rtn);
return NULL;
}
// Else: No more children are to be made
} else {
rtn->children_count = 0;
for(int i = 0; i < 8; i++) rtn->children[i] = NULL;
rtn->primitive_list = (primitive_t *) malloc(sizeof(primitive_t) * primitive_count);
rtn->primitive_count = primitive_count;
memcpy(rtn->primitive_list, primitive_list, sizeof(primitive_t) * primitive_count);
return rtn;
}
}
// Wrapper function for creating BVH's from a model
bvh_t * build_bvh(model_t * model, int limit, int depth) {
return create_bvh(model->primitive_count, model->primitive_list, model->vec_list, NULL, NULL, limit, depth);
}
void destroy_bvh(bvh_t * bvh) {
if (bvh) {
if (bvh->children_count > 0) {
for(int i = 0; i < 8; i++) destroy_bvh(bvh->children[i]);
}
if (bvh->primitive_list) free(bvh->primitive_list);
free(bvh);
}
}
|
cf_cilk_tiny.c | #include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
//#include <omp.h>
#include <cilk/cilk.h>
#include <cilk/reducer_opadd.h>
#include <time.h>
#include <sys/time.h>
#define MICRO_IN_SEC 1000000.00
// the item number and user number should be start from 1, if it start from 0, our program in get_*_data should be modified
#define USER_COUNT 71567
//#define USER_COUNT 3500
#define ITEM_COUNT 65133
#define TEST_COUNT 2089384
#define RECORD_COUNT 7910670
#define ITEM_BLOCK 6000
//#define ITEM_BLOCK 384546
#define ITEM_ROUND (ITEM_COUNT/ITEM_BLOCK)
#define ITEM_LAST (ITEM_COUNT%ITEM_BLOCK)
#define USER_BLOCK 6000
#define USER_ROUND (USER_COUNT/USER_BLOCK)
#define USER_LAST (USER_COUNT%USER_BLOCK)
#define K_SORT 100
#define BLOCK_MINI 100
typedef struct record_item_struct_define
{
int item_id;
int user_id;
float rating;
}record_item_struct;
typedef struct k_similarity_struct_define
{
int k_index;
double k_similarity;
}k_similarity_struct;
double microtime(){
int tv_sec,tv_usec;
double time;
struct timeval tv;
struct timezone tz;
gettimeofday(&tv,&tz);
return tv.tv_sec+tv.tv_usec/MICRO_IN_SEC;
}
float compute_multiply(float* rating_m_block, float* rating_n_block)
{
float sum_multiply;
return sum_multiply = __sec_reduce_add(rating_m_block[0:BLOCK_MINI] * rating_n_block[0:BLOCK_MINI]);
}
// Here, we assume that the item in {user,item,predict_rating} of the test dataset will cover all the items.
void get_pearson_similarity(float* rating_m, float* rating_n, double* average_matrix, k_similarity_struct * k_similarity_matrix, int start_index_m, int start_index_n, int flag){
printf(" in pearson_similarity start_index_m=%d, start_index_n=%d\n", start_index_m, start_index_n);
int nPadded = ( USER_COUNT%8 == 0 ? USER_COUNT : USER_COUNT + (8-USER_COUNT%8) );
double similarity;
// int i,j,m,n,s,k;
int ua,ub,uc;
/*
double sum_numerator=0;
double sum_denominator_square_m=0;
double sum_denominator_square_n=0;
*/
double sum_denominator=0;
double* sum_numerator_matrix =(double*) _mm_malloc(sizeof(double)*USER_COUNT,64);
double* sum_denominator_matrix_m =(double*) _mm_malloc(sizeof(double)*USER_COUNT,64);
double* sum_denominator_matrix_n =(double*) _mm_malloc(sizeof(double)*USER_COUNT,64);
memset(sum_numerator_matrix,0,sizeof(double)*USER_COUNT);
memset(sum_denominator_matrix_m,0,sizeof(double)*USER_COUNT);
memset(sum_denominator_matrix_n,0,sizeof(double)*USER_COUNT);
// float * simi_temp = (float*)_mm_malloc(sizeof(float)*ITEM_BLOCK*USER_COUNT,64);
int numthreads;
int item_index;
// int block_m,block_n;
int end_m=(ITEM_COUNT<(start_index_m+ITEM_BLOCK) ? ITEM_COUNT:(start_index_m+ITEM_BLOCK));
int end_n=(ITEM_COUNT<(start_index_n+ITEM_BLOCK) ? ITEM_COUNT:(start_index_n+ITEM_BLOCK));
//printf("the number of threads is %d\n", omp_get_num_threads());
int block_mini_int = BLOCK_MINI;
printf(" end_m = %d , end_n = %d,...BLOCK_MINI = %d\n",end_m,end_n,block_mini_int);
/*
cilk::reducer_opadd<unsigned long> kk(0);
cilk_for (int k=0; k<1000; k++)
{
kk+=k;
}
*/
double a = microtime();
//compute the pearson similarity
//#pragma omp parallel for collapse(2) private(i,j,k,m,n) reduction(+:sum_numerator,sum_denominator_square_m,sum_denominator_square_n)
//#pragma omp parallel for collapse(2) private(i,j,k,m,n)
//#pragma omp parallel
// #pragma cilk grainsize = ((end_m-start_index_m)*(end_n-start_index_n))/244
// cilk_for (unsigned int mn = 0; mn < (((end_m-start_index_m)*(end_n-start_index_n))/16)*16; mn++)
cilk_for (unsigned int mn = 0; mn < (end_m-start_index_m)*(end_n-start_index_n); mn++)
{
// if (m%100==0) printf ("m = %d, percent= %f/%\n",m,(float)m/ITEM_COUNT*100);
// for(int n=start_index_n; n< end_n; n++)
// {
int m = start_index_m + mn / ( end_n - start_index_n );
int n = start_index_n + mn % ( end_n - start_index_n );
//float* block_m = rating_m + (m - start_index_m)*USER_COUNT;
//float* block_n = rating_n + (n - start_index_n)*USER_COUNT;
int block_m = m - start_index_m;
int block_n = n - start_index_n;
//similarity = cilk_spawn compute_kernel(rating_m,rating_n,block_m,block_n);
// int block_m=1,block_n=1;
float sum_numerator=0;
float sum_denominator_square_m=0;
float sum_denominator_square_n=0;
// cilk::reducer_opadd<float> sum_numerator(0);
// cilk::reducer_opadd<float> sum_denominator_square_m(0);
// cilk::reducer_opadd<float> sum_denominator_square_n(0);
// cilk::reducer_opadd<double> sum_denominator(0);
float * rating_m_block = rating_m+block_m*USER_COUNT;
float * rating_n_block = rating_n+block_n*USER_COUNT;
//#pragma omp for schedule(static) nowait
// #pragma omp parallel for private(i)
// #pragma simd reduction(+:sum_numerator,sum_denominator_square_m,sum_denominator_square_n)
// #pragma vector aligned
// #pragma simd vecremainder vectorlength(16) private(sum_numerator,sum_denominator_square_m,sum_denominator_square_n)
// #pragma simd vecremainder
// #pragma vector aligned
// #pragma ivdep
// #pragma vector always
// #pragma cilk grainsize=(USER_COUNT/1)
// for (int i=0;i < USER_COUNT/BLOCK_MINI; i++)
for (int i=0;i < USER_COUNT; i++)
{
//compute numerator
/* // simi_temp[block_m*USER_COUNT+i] = rating_m[block_m*USER_COUNT+i];
float sum_numerator_tmp = cilk_spawn compute_multiply(rating_m_block + i*BLOCK_MINI, rating_n_block + i*BLOCK_MINI);
sum_numerator += sum_numerator_tmp;
float sum_denominator_square_m_tmp = cilk_spawn compute_multiply(rating_m_block + i*BLOCK_MINI, rating_m_block + i*BLOCK_MINI);
sum_denominator_square_m += sum_denominator_square_m_tmp;
float sum_denominator_square_n_tmp = cilk_spawn compute_multiply(rating_n_block + i*BLOCK_MINI, rating_n_block + i*BLOCK_MINI);
sum_denominator_square_n += sum_denominator_square_n_tmp;
*/
sum_numerator += (rating_m[block_m*USER_COUNT+i])*(rating_n[block_n*USER_COUNT+i]);
//compute the squre in denominator
// sum_denominator_square_m += powf(rating_m[block_m*USER_COUNT+i],2.0);
sum_denominator_square_m += rating_m[block_m*USER_COUNT+i]*rating_m[block_m*USER_COUNT+i];
// sum_denominator_square_n += powf(rating_n[block_n*USER_COUNT+i],2.0);
sum_denominator_square_n += rating_n[block_n*USER_COUNT+i]*rating_n[block_n*USER_COUNT+i];
// if( m==100 && n==100)
// printf("m=%d,n=%d,i=%d, running on thread %d\n",m,n,i, omp_get_thread_num());
}
/*
float * rating_m_block = rating_m+block_m*USER_COUNT;
float * rating_n_block = rating_n+block_n*USER_COUNT;
// #pragma vector aligned (rating_m_block,rating_n_block)
float sum_numerator = __sec_reduce_add(rating_m_block[0:USER_COUNT] * rating_n_block[0:USER_COUNT]);
// float sum_numerator = cilk_spawn compute_numerator(rating_m_block, rating_n_block);
// #pragma vector aligned (rating_m_block)
float sum_denominator_square_m = __sec_reduce_add(rating_m_block[0:USER_COUNT]*rating_m_block[0:USER_COUNT]);
// #pragma vector aligned (rating_n_block)
float sum_denominator_square_n = __sec_reduce_add(rating_n_block[0:USER_COUNT]*rating_n_block[0:USER_COUNT]);
*/
//compute the denominator
sum_denominator = sqrt (sum_denominator_square_m*sum_denominator_square_n);
if(sum_denominator!=0)
similarity = sum_numerator/sum_denominator;
else
similarity = 0;
/*
for (j=0;j<K_SORT;j++)
{
item_index = k_similarity_matrix[m*K_SORT+j].k_index;
if(item_index==-1 || similarity > k_similarity_matrix[m*K_SORT+j].k_similarity)
{
for (s=K_SORT-1;s>j;s--)
{
k_similarity_matrix[m*K_SORT+s].k_index = k_similarity_matrix[m*K_SORT+s-1].k_index;
k_similarity_matrix[m*K_SORT+s].k_similarity = k_similarity_matrix[m*K_SORT+s-1].k_similarity;
}
k_similarity_matrix[m*K_SORT+j].k_index = n;
k_similarity_matrix[m*K_SORT+j].k_similarity = similarity;
break;
}
else if( similarity == k_similarity_matrix[m*K_SORT+j].k_similarity && item_index == n)
{
break;
}
}
if(flag==0) continue;
for (k=0;k<K_SORT;k++)
{
item_index = k_similarity_matrix[n*K_SORT+k].k_index;
if(item_index==-1 || similarity > k_similarity_matrix[n*K_SORT+k].k_similarity)
{
for (s=K_SORT-1;s>j;s--)
{
k_similarity_matrix[n*K_SORT+s].k_index = k_similarity_matrix[n*K_SORT+s-1].k_index;
k_similarity_matrix[n*K_SORT+s].k_similarity = k_similarity_matrix[n*K_SORT+s-1].k_similarity;
}
k_similarity_matrix[n*K_SORT+k].k_index = m;
k_similarity_matrix[n*K_SORT+k].k_similarity = similarity;
break;
}
else if( similarity == k_similarity_matrix[n*K_SORT+k].k_similarity && item_index == m)
{
break;
}
}
*/
// }
}
double b = microtime();
double duration = b-a;
printf(" time consumed: %fs\n", duration);
exit(0);
}
int get_predict_rating(double* predict_rating, float* rating, k_similarity_struct* index_matrix, int* test, double* average_index, int user_start_index,int test_start_index)
{
// Firstly, we should find the union set between rating&index_matrix for the users in test[];
/*
printf(" in predict_rating ...........user_start_index=%d, test_start_index=%d\n", user_start_index, test_start_index);
int m,n,i,j;
int user_id,item_id,k_id;
double sum_rating = 0;
int sum_rating_index = 0;
int sum_no_rating_index=0;
double numerator = 0;
double denominator = 0;
int block_user_id;
// #pragma omp parallel for private(m,i) reduction(+:numerator,denominator)
for (m = test_start_index; m < TEST_COUNT; m++)
{
user_id = test[m*2+0];
item_id = test[m*2+1];
numerator =0;
denominator = 0;
if( user_id < user_start_index) printf(" error__________________+++++++++++++++\n");
if( user_id > ((user_start_index+USER_BLOCK) > USER_COUNT ? USER_COUNT:(user_start_index+USER_BLOCK))) break;
block_user_id = user_id - user_start_index;
for (i = 0; i < K_SORT; i++)
{
k_id = index_matrix[item_id*K_SORT+i].k_index;
if ( rating[block_user_id*ITEM_COUNT+k_id] !=0 )
{
numerator += index_matrix[item_id*K_SORT+i].k_similarity*(rating[block_user_id*ITEM_COUNT+k_id]-average_index[k_id]);
denominator += index_matrix[item_id*K_SORT+i].k_similarity;
}
}
if(denominator !=0)
predict_rating[m] = average_index[item_id] + numerator/ denominator;
else
predict_rating[m] = average_index[item_id];
}
// return predict_rating;
return m;
*/
}
void print_matrix_double(double * matrix,int row,int column)
{
/*
int i,j;
int sum_0=0;
for (i=0;i<row;i++)
{
for(j=0;j<column;j++)
if (matrix[i*column+j]==0) sum_0++;
// printf("%lf ",matrix[i*column+j]);
// printf("\n");
}
printf("sum_0 is %d in a whole %d\n",sum_0,row);
*/
}
void get_item_data(record_item_struct* item_data, char* filename)
{
FILE *fp;
if ((fp=fopen(filename,"r")) == NULL)
{
printf("cannot open this file");
exit(0);
}
int user_id, item_id, timestamp;
float rating=0;
int i=0;
while (fscanf(fp,"%d %d %f %d", &item_id, &user_id, &rating, ×tamp) != EOF)
{
item_data[i].item_id = item_id;
item_data[i].user_id = user_id;
item_data[i].rating = rating;
i++;
}
fclose(fp);
}
int get_item_block_data(int round, float* data, int file_start_index, record_item_struct* item_data)
{
// int i=0;
int p=0;
// #pragma omp parallel for
cilk_for (int i = 0; i<ITEM_BLOCK*USER_COUNT;i++)
{
data[i]=0;
}
// memset(data, 0, sizeof(float)*ITEM_BLOCK*USER_COUNT);
int user_id, item_id;
float rating=0;
for(int i=file_start_index; ;i++)
{
item_id = item_data[i].item_id;
user_id = item_data[i].user_id;
rating = item_data[i].rating;
if ((item_id-1) >= (round+1)*ITEM_BLOCK) break;
data[(item_id-1-(round*ITEM_BLOCK))*USER_COUNT + user_id-1] = rating;
p++;
}
return p;
}
long get_user_block_data(int round, float* data, long file_start_index)
{
FILE *fp;
int i=0;
//int large_user_id=0;
//int large_item_id=0;
/*
float * data = (float*)malloc(sizeof(float)*USER_BLOCK*ITEM_COUNT);
if (data==NULL)
{
printf(" malloc of base_data failed\n");
exit(1);
}
*/
memset(data, 0, sizeof(float)*USER_BLOCK*ITEM_COUNT);
if ((fp=fopen("r1.train.raw","r")) == NULL)
{
printf("cannot open this file");
exit(0);
}
int user_id, item_id, timestamp;
float rating=0;
long file_offset = 0;
fseek(fp, file_start_index, 0);
while (fscanf(fp,"%d %d %f %d", &user_id, &item_id, &rating, ×tamp) != EOF)
{
if ((user_id-1) < round*USER_BLOCK) continue;
if ((user_id-1) >= (round+1)*USER_BLOCK) break;
data[(user_id-1-(round*USER_BLOCK))*ITEM_COUNT + item_id-1] = rating;
file_offset = ftell(fp);
// data[(user_id-1)*ITEM_COUNT + item_id-1] = rating;
// if (user_id > large_user_id) large_user_id = user_id;
// if (item_id > large_item_id) large_item_id = item_id;
// printf("getting base_data on line i=%d,user_id=%d,item_id=%d,rating=%f\n",i++,user_id,item_id,rating);
}
// printf("the largest user_id is %d\n the largest item_id is %d\n",large_user_id,large_item_id);
fclose(fp);
return file_offset;
// return data;
}
void get_test_data(int* data, float* rating)
{
FILE *fp;
int i=0;
memset(data, 0, sizeof(int));
if ((fp=fopen("r1.test","r")) == NULL)
{
printf("cannot open this file");
exit(0);
}
int user_id, item_id, timestamp;
float rating_temp;
while (fscanf(fp,"%d %d %f %d", &user_id, &item_id, &rating_temp, ×tamp) != EOF)
{
data[i*2+0] = user_id-1;
data[i*2+1] = item_id-1;
rating[i] = rating_temp;
i++;
//printf("getting test_data on line i=%d,user_id=%d,item_id=%d,rating_temp=%f\n",i,user_id,item_id,rating_temp);
}
fclose(fp);
}
double get_rmse(float* test_rating, double* predict_data)
{
/*
int m,n,i,j;
double numerator = 0;
double denominator = TEST_COUNT;
#pragma omp parallel for private(i) reduction(+:numerator)
for(i=0;i<TEST_COUNT;i++)
{
numerator += pow ((test_rating[i] - predict_data[i]),2.0);
}
return (numerator/denominator);
*/
return 0;
}
void get_item_average_matrix(float* rating,double* average_matrix, int start_index)
{
// int m,n,i,j;
// int average_index =0;
// int average_sum = 0;
// int block_m = 0;
double average_item=0;
// #pragma omp parallel for private(m,n) reduction(+:average_sum,average_index)
cilk_for (int m = start_index; m<(ITEM_COUNT < (start_index+ITEM_BLOCK) ? ITEM_COUNT:(start_index+ITEM_BLOCK)); m++ )
{
cilk::reducer_opadd<int> average_sum(0);
cilk::reducer_opadd<int> average_index(0);
int block_m = m-start_index;
#pragma simd
#pragma vector aligned
#pragma vector always
#pragma ivdep
for(int n=0;n<USER_COUNT;n++)
{
if(rating[block_m*USER_COUNT+n] !=0)
{
average_sum += rating[block_m*USER_COUNT+n];
average_index += 1;
}
}
if(average_index.get_value()!=0)
{
average_matrix[m]=(double)average_sum.get_value()/(double)average_index.get_value();
}
else
{
average_matrix[m]=0;
}
}
// #pragma omp parallel for private(m,n)
// #pragma vector aligned
// #pragma ivdep
cilk_for (int m = start_index; m<(ITEM_COUNT < (start_index+ITEM_BLOCK) ? ITEM_COUNT:(start_index+ITEM_BLOCK)); m++ )
{
int block_m = m-start_index;
average_item = average_matrix[m];
#pragma simd
for(int n=0;n<USER_COUNT;n++)
{
rating[block_m*USER_COUNT+n] -= (float)average_item;
}
}
}
int main(int argc, char ** argv){
//first, read the data in files into an array in order to process it more efficiently.
record_item_struct * item_data = (record_item_struct *) _mm_malloc(sizeof(record_item_struct)*RECORD_COUNT,64);
memset(item_data, 0, sizeof(record_item_struct)*RECORD_COUNT);
get_item_data(item_data, argv[1]);
float * item_block_data_i = (float*)_mm_malloc(sizeof(float)*ITEM_BLOCK*USER_COUNT,64);
float * item_block_data_j = (float*)_mm_malloc(sizeof(float)*ITEM_BLOCK*USER_COUNT,64);
if (item_block_data_i==NULL || item_block_data_j == NULL)
{
printf(" malloc of base_data failed\n");
exit(1);
}
double * item_average_matrix = (double*)_mm_malloc(sizeof(double)*ITEM_COUNT,64);
memset(item_average_matrix,0,sizeof(double)*ITEM_COUNT);
k_similarity_struct * item_index_matrix = (k_similarity_struct *) _mm_malloc (sizeof(k_similarity_struct)*K_SORT*ITEM_COUNT,64);
memset (item_index_matrix,-1,sizeof(k_similarity_struct));
int item_start_index_i=0;
int item_start_index_j=0;
// int i,j;
for (int i=0; i <= ITEM_ROUND; i++)
{
printf("round %d ================== with ITEM_BLOCK %d\n",i,ITEM_BLOCK);
//block_data
printf("get item_block_data begins\n");
item_start_index_i = get_item_block_data(i, item_block_data_i,item_start_index_i, item_data);
printf("get_item_block_data ends\n");
//average matrix
printf("get_item_average_matrix begins\n");
get_item_average_matrix(item_block_data_i, item_average_matrix,i*ITEM_BLOCK);
printf("get_item_average_matrix ends\n");
item_start_index_j = 0;
for(int j=0; j<= i;j++)
{
if( i==j)
{
//the index of item after sorting the similarity matrix
printf("get k_similarity_begins\n");
get_pearson_similarity(item_block_data_i,item_block_data_i,item_average_matrix,item_index_matrix,i*ITEM_BLOCK,i*ITEM_BLOCK,0);
printf("get k_similarity_ends\n");
continue;
}
//block_data
printf("get item_block_data begins\n");
item_start_index_j = get_item_block_data(j, item_block_data_j, item_start_index_j, item_data);
printf("get_item_block_data ends\n");
//the index of item after sorting the similarity matrix
printf("get k_similarity_begins\n");
get_pearson_similarity(item_block_data_i,item_block_data_j,item_average_matrix,item_index_matrix,i*ITEM_BLOCK,j*ITEM_BLOCK,1);
printf("get k_similarity_ends\n");
}
}
_mm_free(item_block_data_i);
_mm_free(item_block_data_j);
int *test_data;
float *test_rating;
test_data = (int*)_mm_malloc (sizeof(int)*2*TEST_COUNT,64);
test_rating= (float*)_mm_malloc(sizeof(float)*TEST_COUNT,64);
printf("get_test_data begins\n");
get_test_data(test_data,test_rating);
printf("get_test_data ends\n");
long user_file_start_index = 0;
float * user_block_data = (float*)_mm_malloc(sizeof(float)*USER_BLOCK*ITEM_COUNT,64);
if (user_block_data==NULL)
{
printf(" malloc of base_data failed\n");
exit(1);
}
int test_start_index = 0;
double * item_predict_rating = (double*)_mm_malloc (sizeof(double)*TEST_COUNT,64);
for(int i=0;i<=USER_ROUND;i++)
{
user_file_start_index = get_user_block_data(i,user_block_data, user_file_start_index);
printf("get_predict_rating begins\n");
test_start_index=get_predict_rating(item_predict_rating, user_block_data, item_index_matrix, test_data,item_average_matrix,i*USER_BLOCK, test_start_index);
printf("get_predict_rating ends\n");
if ( test_start_index == TEST_COUNT)
break;
}
_mm_free (user_block_data);
double rmse;
printf("get_rmse begins\n");
rmse = get_rmse(test_rating,item_predict_rating);
printf("ge_rmse ends\n");
printf("rmse= %f\n", rmse);
return 0;
}
|
GB_binop__times_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__times_int32
// A.*B function (eWiseMult): GB_AemultB__times_int32
// A*D function (colscale): GB_AxD__times_int32
// D*A function (rowscale): GB_DxB__times_int32
// C+=B function (dense accum): GB_Cdense_accumB__times_int32
// C+=b function (dense accum): GB_Cdense_accumb__times_int32
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__times_int32
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__times_int32
// C=scalar+B GB_bind1st__times_int32
// C=scalar+B' GB_bind1st_tran__times_int32
// C=A+scalar GB_bind2nd__times_int32
// C=A'+scalar GB_bind2nd_tran__times_int32
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = (aij * bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x * y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TIMES || GxB_NO_INT32 || GxB_NO_TIMES_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__times_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__times_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__times_int32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__times_int32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__times_int32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__times_int32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__times_int32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__times_int32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__times_int32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t bij = Bx [p] ;
Cx [p] = (x * bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__times_int32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
Cx [p] = (aij * y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (x * aij) ; \
}
GrB_Info GB_bind1st_tran__times_int32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (aij * y) ; \
}
GrB_Info GB_bind2nd_tran__times_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
main.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <time.h>
#include <assert.h>
#include <omp.h>
#if defined(recursive)
#define clz(x) clz2(x,0)
#endif
#if defined(overload)
#include "clz.hpp"
#else
#include "clz.h"
#endif
static inline __attribute__((always_inline))
void get_cycles(unsigned *high, unsigned *low)
{
asm volatile ("CPUID\n\t"
"RDTSC\n\t"
"mov %%edx, %0\n\t"
"movl %%eax, %1\n\t": "=r" (*high), "=r" (*low)::"%rax","%rbx","%rcx","%rdx"
);
}
static inline __attribute__((always_inline))
void get_cycles_end(unsigned *high, unsigned *low)
{
asm volatile("RDTSCP\n\t"
"mov %%edx, %0\n\t"
"mov %%eax, %1\n\t"
"CPUID\n\t": "=r" (*high), "=r" (*low)::"%rax","%rbx","%rcx","%rdx"
);
}
static inline __attribute__((always_inline))
uint64_t diff_in_cycles(unsigned high1, unsigned low1,
unsigned high2, unsigned low2)
{
uint64_t start,end;
start = (((uint64_t) high1 << 32) | low1);
end = (((uint64_t) high2 << 32) | low2);
return end - start;
}
static inline __attribute__((unused))
double diff_in_second(struct timespec t1, struct timespec t2)
{
struct timespec diff;
if (t2.tv_nsec-t1.tv_nsec < 0) {
diff.tv_sec = t2.tv_sec - t1.tv_sec - 1;
diff.tv_nsec = t2.tv_nsec - t1.tv_nsec + 1000000000;
} else {
diff.tv_sec = t2.tv_sec - t1.tv_sec;
diff.tv_nsec = t2.tv_nsec - t1.tv_nsec;
}
return (diff.tv_sec + diff.tv_nsec / 1000000000.0);
}
int main(int argc, char *argv[])
{
FILE *output = NULL;
uint64_t timec, time_all = 0;
unsigned timec_high1, timec_low1, timec_high2, timec_low2;
#if defined(correct)
for (int try = 0; try < 20; try++) {
timec = 0;
get_cycles(&timec_high1, &timec_low1);
for (uint32_t i = 0; i < 31; i++) {
printf("%u:%d \n",1<<i,clz(1<<i));
for (uint32_t j = (1 << i); j < (1 << (i + 1)); j++) {
assert( __builtin_clz (j) == clz(j));
}
}
get_cycles_end(&timec_high2, &timec_low2);
timec = diff_in_cycles(timec_high1, timec_low1, timec_high2, timec_low2);
printf("executiom time : %lu cycles\n", timec);
}
#else
assert(argv[1] && argv[2] && "insert argument");
unsigned int min = atoi(argv[1]);
unsigned int max = atoi(argv[2]);
#if defined(recursive)
output = fopen("recursive.txt","a");
#elif defined(iteration)
output = fopen("iteration.txt","a");
#elif defined(byte)
output = fopen("byte.txt","a");
#elif defined(binary)
output = fopen("binary.txt","a");
#elif defined(harley)
output = fopen("harley.txt","a");
#elif defined(overload)
output = fopen("overload.txt","a");
#endif
uint64_t timecall;
for (uint32_t i = min; i < max; i++) {
timecall = 0;
#ifdef MP
#pragma omp parallel for
#endif
for (uint32_t j = 0; j < 100; j++) {
get_cycles(&timec_high1, &timec_low1);
clz(i);
get_cycles_end(&timec_high2, &timec_low2);
timec = diff_in_cycles(timec_high1, timec_low1, timec_high2, timec_low2);
timecall += timec;
}
time_all += (timecall / 100);
fprintf(output, "%d %lu cycles\n", i, timecall / 100);
printf("%d %lu cycles\n", i, timecall / 100);
}
fclose(output);
printf("took %lf million cycles\n", time_all / 1000000.0);
#endif
return 0;
}
|
critical_dead.c | int counter = 0;
void foo()
{
#pragma omp critical
{
if (counter <100)
counter++;
//else
// return;
// foo();
}
foo();
}
int main()
{
foo();
return 0;
}
|
csr_matvec.c | /*BHEADER**********************************************************************
* Copyright (c) 2006 The Regents of the University of California.
* Produced at the Lawrence Livermore National Laboratory.
* Written by the HYPRE team. UCRL-CODE-222953.
* All rights reserved.
*
* This file is part of HYPRE (see http://www.llnl.gov/CASC/hypre/).
* Please see the COPYRIGHT_and_LICENSE file for the copyright notice,
* disclaimer, contact information and the GNU Lesser General Public License.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU General Public License (as published by the Free Software
* Foundation) version 2.1 dated February 1999.
*
* HYPRE is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the terms and conditions of the GNU General
* Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* $Revision: 2.10 $
***********************************************************************EHEADER*/
/******************************************************************************
*
* Matvec functions for hypre_CSRMatrix class.
*
*****************************************************************************/
#include "headers.h"
#include <assert.h>
//#include "omp.h"
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvec
*--------------------------------------------------------------------------*/
int
hypre_CSRMatrixMatvec( double alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
double beta,
hypre_Vector *y )
{
double *A_data = hypre_CSRMatrixData(A);
int *A_i = hypre_CSRMatrixI(A);
int *A_j = hypre_CSRMatrixJ(A);
int num_rows = hypre_CSRMatrixNumRows(A);
int num_cols = hypre_CSRMatrixNumCols(A);
int *A_rownnz = hypre_CSRMatrixRownnz(A);
int num_rownnz = hypre_CSRMatrixNumRownnz(A);
double *x_data = hypre_VectorData(x);
double *y_data = hypre_VectorData(y);
int x_size = hypre_VectorSize(x);
int y_size = hypre_VectorSize(y);
int num_vectors = hypre_VectorNumVectors(x);
int idxstride_y = hypre_VectorIndexStride(y);
int vecstride_y = hypre_VectorVectorStride(y);
int idxstride_x = hypre_VectorIndexStride(x);
int vecstride_x = hypre_VectorVectorStride(x);
double temp, tempx;
int i, j, jj;
int m;
double xpar=0.7;
int ierr = 0;
/*---------------------------------------------------------------------
* Check for size compatibility. Matvec returns ierr = 1 if
* length of X doesn't equal the number of columns of A,
* ierr = 2 if the length of Y doesn't equal the number of rows
* of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in Matvec, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
hypre_assert( num_vectors == hypre_VectorNumVectors(y) );
if (num_cols != x_size)
ierr = 1;
if (num_rows != y_size)
ierr = 2;
if (num_cols != x_size && num_rows != y_size)
ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] *= beta;
return ierr;
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] = 0.0;
}
else
{
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] *= temp;
}
}
/*-----------------------------------------------------------------
* y += A*x
*-----------------------------------------------------------------*/
/* use rownnz pointer to do the A*x multiplication when num_rownnz is smaller than num_rows */
if (num_rownnz < xpar*(num_rows))
{
for (i = 0; i < num_rownnz; i++)
{
m = A_rownnz[i];
/*
* for (jj = A_i[m]; jj < A_i[m+1]; jj++)
* {
* j = A_j[jj];
* y_data[m] += A_data[jj] * x_data[j];
* } */
if ( num_vectors==1 )
{
tempx = y_data[m];
for (jj = A_i[m]; jj < A_i[m+1]; jj++)
tempx += A_data[jj] * x_data[A_j[jj]];
y_data[m] = tempx;
}
else
for ( j=0; j<num_vectors; ++j )
{
tempx = y_data[ j*vecstride_y + m*idxstride_y ];
for (jj = A_i[m]; jj < A_i[m+1]; jj++)
tempx += A_data[jj] * x_data[ j*vecstride_x + A_j[jj]*idxstride_x ];
y_data[ j*vecstride_y + m*idxstride_y] = tempx;
}
}
}
else
{
#pragma omp parallel for private(i,jj,temp) schedule(static)
for (i = 0; i < num_rows; i++)
{
if ( num_vectors==1 )
{
temp = y_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
temp += A_data[jj] * x_data[A_j[jj]];
y_data[i] = temp;
}
else
for ( j=0; j<num_vectors; ++j )
{
temp = y_data[ j*vecstride_y + i*idxstride_y ];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
temp += A_data[jj] * x_data[ j*vecstride_x + A_j[jj]*idxstride_x ];
}
y_data[ j*vecstride_y + i*idxstride_y ] = temp;
}
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] *= alpha;
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvecT
*
* Performs y <- alpha * A^T * x + beta * y
*
* From Van Henson's modification of hypre_CSRMatrixMatvec.
*--------------------------------------------------------------------------*/
int
hypre_CSRMatrixMatvecT( double alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
double beta,
hypre_Vector *y )
{
double *A_data = hypre_CSRMatrixData(A);
int *A_i = hypre_CSRMatrixI(A);
int *A_j = hypre_CSRMatrixJ(A);
int num_rows = hypre_CSRMatrixNumRows(A);
int num_cols = hypre_CSRMatrixNumCols(A);
double *x_data = hypre_VectorData(x);
double *y_data = hypre_VectorData(y);
int x_size = hypre_VectorSize(x);
int y_size = hypre_VectorSize(y);
int num_vectors = hypre_VectorNumVectors(x);
int idxstride_y = hypre_VectorIndexStride(y);
int vecstride_y = hypre_VectorVectorStride(y);
int idxstride_x = hypre_VectorIndexStride(x);
int vecstride_x = hypre_VectorVectorStride(x);
double temp;
int i, i1, j, jv, jj, ns, ne, size, rest;
int num_threads;
int ierr = 0;
/*---------------------------------------------------------------------
* Check for size compatibility. MatvecT returns ierr = 1 if
* length of X doesn't equal the number of rows of A,
* ierr = 2 if the length of Y doesn't equal the number of
* columns of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in MatvecT, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
hypre_assert( num_vectors == hypre_VectorNumVectors(y) );
if (num_rows != x_size)
ierr = 1;
if (num_cols != y_size)
ierr = 2;
if (num_rows != x_size && num_cols != y_size)
ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= beta;
return ierr;
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] = 0.0;
}
else
{
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= temp;
}
}
/*-----------------------------------------------------------------
* y += A^T*x
*-----------------------------------------------------------------*/
num_threads = hypre_NumThreads();
if (num_threads > 1)
{
for (i1 = 0; i1 < num_threads; i1++)
{
size = num_cols/num_threads;
rest = num_cols - size*num_threads;
if (i1 < rest)
{
ns = i1*size+i1-1;
ne = (i1+1)*size+i1+1;
}
else
{
ns = i1*size+rest-1;
ne = (i1+1)*size+rest;
}
if ( num_vectors==1 )
{
for (i = 0; i < num_rows; i++)
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
if (j > ns && j < ne)
y_data[j] += A_data[jj] * x_data[i];
}
}
}
else
{
for (i = 0; i < num_rows; i++)
{
for ( jv=0; jv<num_vectors; ++jv )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
if (j > ns && j < ne)
y_data[ j*idxstride_y + jv*vecstride_y ] +=
A_data[jj] * x_data[ i*idxstride_x + jv*vecstride_x];
}
}
}
}
}
}
else
{
for (i = 0; i < num_rows; i++)
{
if ( num_vectors==1 )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data[j] += A_data[jj] * x_data[i];
}
}
else
{
for ( jv=0; jv<num_vectors; ++jv )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data[ j*idxstride_y + jv*vecstride_y ] +=
A_data[jj] * x_data[ i*idxstride_x + jv*vecstride_x ];
}
}
}
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= alpha;
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvec_FF
*--------------------------------------------------------------------------*/
int
hypre_CSRMatrixMatvec_FF( double alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
double beta,
hypre_Vector *y,
int *CF_marker_x,
int *CF_marker_y,
int fpt )
{
double *A_data = hypre_CSRMatrixData(A);
int *A_i = hypre_CSRMatrixI(A);
int *A_j = hypre_CSRMatrixJ(A);
int num_rows = hypre_CSRMatrixNumRows(A);
int num_cols = hypre_CSRMatrixNumCols(A);
double *x_data = hypre_VectorData(x);
double *y_data = hypre_VectorData(y);
int x_size = hypre_VectorSize(x);
int y_size = hypre_VectorSize(y);
double temp;
int i, jj;
int ierr = 0;
/*---------------------------------------------------------------------
* Check for size compatibility. Matvec returns ierr = 1 if
* length of X doesn't equal the number of columns of A,
* ierr = 2 if the length of Y doesn't equal the number of rows
* of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in Matvec, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
if (num_cols != x_size)
ierr = 1;
if (num_rows != y_size)
ierr = 2;
if (num_cols != x_size && num_rows != y_size)
ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= beta;
return ierr;
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] = 0.0;
}
else
{
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= temp;
}
}
/*-----------------------------------------------------------------
* y += A*x
*-----------------------------------------------------------------*/
for (i = 0; i < num_rows; i++)
{
if (CF_marker_x[i] == fpt)
{
temp = y_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
if (CF_marker_y[A_j[jj]] == fpt) temp += A_data[jj] * x_data[A_j[jj]];
y_data[i] = temp;
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= alpha;
}
return ierr;
}
|
nlk_pv.c | /******************************************************************************
* NLK - Neural Language Kit
*
* Copyright (c) 2014 Luis Rei <me@luisrei.com> http://luisrei.com @lmrei
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*****************************************************************************/
/** @file nlk_pv.c
* Paragraph Vector Inference functions
* @note: code relative to training a PV model is in nlk_w2v.c
*/
#include <errno.h>
#include <stdio.h>
#include <omp.h>
#include "nlk_err.h"
#include "nlk_array.h"
#include "nlk_tic.h"
#include "nlk_text.h"
#include "nlk_corpus.h"
#include "nlk_random.h"
#include "nlk_w2v.h"
#include "nlk_learn_rate.h"
#include "nlk_pv.h"
/**
* Display progress of paragraph vector generation
*
* @param generated number of PVs generated so far
* @param total total number of PVs to generate
*/
static void
nlk_pv_display(const size_t generated, const size_t total)
{
char display_str[64];
float progress = (generated / (float) total) * 100;
snprintf(display_str, 64,
"Progress: %.2f%% (%zu/%zu) Threads: %d\t",
progress, generated, total, omp_get_num_threads());
nlk_tic(display_str, false);
}
/**
* Sets Paragraph Vector Inference Mode (ie. PV generation)
* In this mode, gradients are only propagated into the Paragraphs Table and
* not into the Words Table or the second (HS/NEG) layers.
*
* @param nn the neural network structure
*/
void
nlk_pv_inference_mode(struct nlk_neuralnet_t *nn)
{
/* Prevent Layer Weights From Changing: Words & HS/Neg */
nn->words->update = false;
if(nn->train_opts.hs) {
nn->hs->update = false;
}
if(nn->train_opts.negative) {
nn->neg->update = false;
}
}
/**
* Disables Paragraph Vector Inference Mode (i.e. PV generation)
* In learn mode, gradients are backpropagate to all layers.
*
* @param nn the neural network structure
*/
void
nlk_pv_learn_mode(struct nlk_neuralnet_t *nn)
{
/* Allow Layer Weights to Change: Words & HS/Neg */
nn->words->update = true;
if(nn->train_opts.hs) {
nn->hs->update = true;
}
if(nn->train_opts.negative) {
nn->neg->update = true;
}
}
/**
* Infer Paragraph Vector for a line
* This is function is the function that does the inference part for
* nlk_pv_gen and nlk_pv_gen_string.
*
* @param nn the neural network structure
* @param line the line structure holding the input
* @param epochs the number of epochs or iterations to generate a PV
* @param paragraphs the paragraph table that will be updated (output)
* @param line_sample will hold sampled line for each epoch
* @param contexts will hold the contexts generated for each epoch
* @param grad_acc for accumulating grandients
* @param layer1_out for holding the output of the first layer
*/
inline static void
nlk_pv_gen_line(struct nlk_neuralnet_t *nn,
struct nlk_line_t *line,
const unsigned int epochs,
struct nlk_layer_lookup_t *paragraphs,
struct nlk_line_t *line_sample,
struct nlk_context_t **contexts, NLK_ARRAY *grad_acc,
NLK_ARRAY *layer1_out)
{
NLK_LM model_type = nn->train_opts.model_type;
nlk_real learn_rate = nn->train_opts.learn_rate;
const nlk_real learn_rate_start = nn->train_opts.learn_rate;
uint64_t train_words = nn->train_opts.word_count;
const float sample_rate = nn->train_opts.sample;
/* Generate PV for this line */
unsigned int n_examples;
unsigned int ex;
uint64_t line_words;
uint64_t word_count_actual = 0;
line_words = line->len;
/** @section Generate Contexts Update Vector Loop
*/
unsigned int local_epoch = 0;
for(local_epoch = 0; local_epoch < epochs; local_epoch++) {
word_count_actual += line_words;
/* subsample line */
nlk_vocab_line_subsample(line, train_words, sample_rate,
line_sample);
/* single word, nothing to do ... */
if(line_sample->len < 2) {
continue;
}
/* generate contexts */
n_examples = nlk_context_window(line_sample->varray,
line_sample->len,
line_sample->line_id,
&nn->context_opts,
contexts);
/** @subsection Update the Paragraph Vector with these contexts
*/
switch(model_type) {
case NLK_PVDBOW:
for(ex = 0; ex < n_examples; ex++) {
nlk_pvdbow(nn, paragraphs, learn_rate,
contexts[ex], grad_acc, layer1_out);
}
break;
case NLK_PVDM:
for(ex = 0; ex < n_examples; ex++) {
nlk_pvdm(nn, paragraphs, learn_rate, contexts[ex],
grad_acc, layer1_out);
}
break;
case NLK_PVDM_CONCAT:
for(ex = 0; ex < n_examples; ex++) {
nlk_pvdm_cc(nn, paragraphs, learn_rate, contexts[ex],
grad_acc, layer1_out);
}
break;
case NLK_MODEL_NULL:
case NLK_CBOW:
case NLK_CBOW_SUM:
case NLK_SKIPGRAM:
default:
NLK_ERROR_ABORT("invalid model type", NLK_EINVAL);
/* unreachable */
} /* end of model switch */
learn_rate = nlk_learn_rate_w2v(learn_rate, learn_rate_start,
epochs, word_count_actual,
line_words);
} /* end of contexts: pv has been generated */
}
/**
* Paragraph Vector Inference
* Creates a new Paragraph Table and uses the neural network and the corpus
* to generate a PV for each line in the corpus.
*
* @param nn the neural network structure
* @param corpus the input corpus
* @param epochs the number of epochs or iterations to generate a PV
* @param verbose siplay progress and print other stuff that is useful
*
* @return a paragraph table (lookup layer) with the PVs generated
*/
struct nlk_layer_lookup_t *
nlk_pv_gen(struct nlk_neuralnet_t *nn, const struct nlk_corpus_t *corpus,
const unsigned int epochs, const bool verbose)
{
if(verbose) {
nlk_tic("Generating paragraph vectors", false);
printf(" (%u iterations)\n", epochs);
}
/* create new paragraph table */
struct nlk_layer_lookup_t *paragraphs;
paragraphs = nlk_layer_lookup_create(corpus->len, nn->words->weights->cols);
nlk_layer_lookup_init(paragraphs);
/* unpack options */
unsigned int ctx_size = nn->context_opts.max_size;
unsigned int layer_size2 = 0;
if(nn->train_opts.hs) {
layer_size2 = nn->hs->weights->cols;
} else if(nn->train_opts.negative) {
layer_size2 = nn->neg->weights->cols;
}
/* lines shortcut */
struct nlk_line_t *lines = corpus->lines;
/* prevent weights from changing for words and hs/neg */
nlk_pv_inference_mode(nn);
/* progress */
size_t generated = 0;
const size_t total = corpus->len;
/** @section Parallel Generation of PVs
*/
#pragma omp parallel shared(generated)
{
struct nlk_line_t *line = NULL;
/* for converting a sentence to a series of training contexts */
struct nlk_context_t **contexts = NULL;
contexts = nlk_context_create_array(ctx_size);
/* for undersampling words in a line */
struct nlk_line_t *line_sample = nlk_line_create(NLK_MAX_LINE_SIZE);
/* output of the first layer */
NLK_ARRAY *layer1_out = nlk_array_create(layer_size2, 1);
/* for storing gradients */
NLK_ARRAY *grad_acc = nlk_array_create(1, layer_size2);
/* variables for handling splitting the corpus among threads */
int num_threads = omp_get_num_threads();
size_t line_cur;
size_t end_line;
#pragma omp for
for(int thread_id = 0; thread_id < num_threads; thread_id++) {
/* get the thread's part of the corpus */
line_cur = nlk_text_get_split_start_line(total, num_threads,
thread_id);
end_line = nlk_text_get_split_end_line(total, num_threads,
thread_id);
/* for each line, generate it's PV */
while(line_cur < end_line) {
/* get the next line */
line = &lines[line_cur];
/* generate the paragraph vector */
nlk_pv_gen_line(nn, line, epochs, paragraphs, line_sample,
contexts, grad_acc, layer1_out);
/* go to next line */
line_cur++;
generated++;
/* display progress */
if(verbose) {
nlk_pv_display(generated, total);
}
} /* end of PV */
} /* end of all PVs (thread loop) */
if(verbose) {
nlk_pv_display(generated, total);
}
/* free thread memory */
nlk_context_free_array(contexts);
nlk_line_free(line_sample);
nlk_array_free(layer1_out);
nlk_array_free(grad_acc);
} /* end of parallel section */
if(verbose) {
printf("\n");
}
nlk_pv_learn_mode(nn);
return paragraphs;
}
/**
* Infer Paragraph Vector for a string
*
* @param nn the neural netowrk
* @param str the string
* @param epochs epochs (iterations) to use in generating the PVs
*
* @return a paragraph table (lookup layer) with the PV generated (id = 0)
*
* @warning terribly inneficient function that allocates and frees all memory
* each time it is called.
*/
struct nlk_layer_lookup_t *
nlk_pv_gen_string(struct nlk_neuralnet_t *nn, char *str,
const unsigned int epochs)
{
/** @section Initialization
*/
struct nlk_layer_lookup_t *paragraphs = NULL;
paragraphs = nlk_layer_lookup_create(1, nn->words->weights->cols);
nlk_layer_lookup_init(paragraphs);
/* unpack options */
unsigned int ctx_size = nn->context_opts.max_size;
unsigned int layer_size2 = 0;
if(nn->train_opts.hs) {
layer_size2 = nn->hs->weights->cols;
} else if(nn->train_opts.negative) {
layer_size2 = nn->neg->weights->cols;
}
/* representation of the line as an array of pointer to strings */
char **tline = nlk_text_line_create();
struct nlk_line_t *line = nlk_line_create(NLK_MAX_LINE_SIZE);
line->line_id = 0;
/* for converting a sentence to a series of training contexts */
struct nlk_context_t **contexts = NULL;
contexts = nlk_context_create_array(ctx_size);
/* for undersampling words in a line */
struct nlk_line_t *line_sample = nlk_line_create(NLK_MAX_LINE_SIZE);
/* output of the first layer */
NLK_ARRAY *layer1_out = nlk_array_create(layer_size2, 1);
/* for storing gradients */
NLK_ARRAY *grad_acc = nlk_array_create(1, layer_size2);
/** @section Vocabularize & Generate Paragraph Vector
*/
const size_t len = strlen(str);
/* 1 - convert to **text_line */
nlk_text_line_read(str, len, tline);
/* 2 - vocabularity */
line->len = nlk_vocab_vocabularize(&nn->vocab, tline, NULL, line->varray);
/* 3 - generate the paragraph vector */
nlk_pv_gen_line(nn, line, epochs, paragraphs, line_sample,
contexts, grad_acc, layer1_out);
/**@section Free memory
*/
nlk_text_line_free(tline);
nlk_line_free(line);
nlk_context_free_array(contexts);
nlk_line_free(line_sample);
nlk_array_free(layer1_out);
nlk_array_free(grad_acc);
return paragraphs;
}
|
SPHCalcDensityFunctor.h | /**
* @file SPHCalcDensityFunctor.h
* @author seckler
* @date 19.01.18
*/
#pragma once
#include "autopas/pairwiseFunctors/Functor.h"
#include "autopas/particles/OwnershipState.h"
#include "autopas/sph/SPHKernels.h"
namespace autopas::sph {
/**
* Class that defines the density functor.
* It is used to calculate the density based on the given SPH kernel.
* @tparam Particle
* @tparam ParticleCell
*/
template <class Particle>
class SPHCalcDensityFunctor : public Functor<Particle, SPHCalcDensityFunctor<Particle>> {
public:
/// soa arrays type
using SoAArraysType = typename Particle::SoAArraysType;
SPHCalcDensityFunctor() : autopas::Functor<Particle, SPHCalcDensityFunctor<Particle>>(0.){};
bool isRelevantForTuning() override { return true; }
bool allowsNewton3() override { return true; }
bool allowsNonNewton3() override { return true; }
[[nodiscard]] bool isAppropriateClusterSize(unsigned int clusterSize,
DataLayoutOption::Value dataLayout) const override {
return dataLayout == DataLayoutOption::aos; // This functor does only support clusters via aos.
}
/**
* Calculates the density contribution of the interaction of particle i and j.
* It is not symmetric, because the smoothing lenghts of the two particles can
* be different.
* @param i first particle of the interaction
* @param j second particle of the interaction
* @param newton3 defines whether or whether not to use newton 3
*/
inline void AoSFunctor(Particle &i, Particle &j, bool newton3 = true) override {
if (i.isDummy() or j.isDummy()) {
return;
}
const std::array<double, 3> dr = utils::ArrayMath::sub(j.getR(), i.getR()); // ep_j[j].pos - ep_i[i].pos;
const double density =
j.getMass() * SPHKernels::W(dr, i.getSmoothingLength()); // ep_j[j].mass * W(dr, ep_i[i].smth)
i.addDensity(density);
if (newton3) {
// Newton 3:
// W is symmetric in dr, so no -dr needed, i.e. we can reuse dr
const double density2 = i.getMass() * SPHKernels::W(dr, j.getSmoothingLength());
j.addDensity(density2);
}
}
/**
* Get the number of floating point operations used in one full kernel call
* @return the number of floating point operations
*/
static unsigned long getNumFlopsPerKernelCall() {
unsigned long flops = 0;
flops += 3; // calculating dr
flops += 2 * SPHKernels::getFlopsW(); // flops for calling W
flops += 2 * 1; // calculating density
flops += 2 * 1; // adding density
return flops;
}
/**
* @copydoc Functor::SoAFunctorSingle(SoAView<SoAArraysType>, bool)
* This functor ignores the newton3 value, as we do not expect any benefit from disabling newton3.
*/
void SoAFunctorSingle(SoAView<SoAArraysType> soa, bool newton3) override {
if (soa.getNumParticles() == 0) return;
double *const __restrict xptr = soa.template begin<Particle::AttributeNames::posX>();
double *const __restrict yptr = soa.template begin<Particle::AttributeNames::posY>();
double *const __restrict zptr = soa.template begin<Particle::AttributeNames::posZ>();
double *const __restrict densityptr = soa.template begin<Particle::AttributeNames::density>();
double *const __restrict smthptr = soa.template begin<Particle::AttributeNames::smth>();
double *const __restrict massptr = soa.template begin<Particle::AttributeNames::mass>();
const auto *const __restrict ownedStatePtr = soa.template begin<Particle::AttributeNames::ownershipState>();
size_t numParticles = soa.getNumParticles();
for (unsigned int i = 0; i < numParticles; ++i) {
// checks whether particle i is owned.
if (ownedStatePtr[i] == OwnershipState::dummy) {
continue;
}
double densacc = 0.;
// icpc vectorizes this.
// g++ only with -ffast-math or -funsafe-math-optimizations
#pragma omp simd reduction(+ : densacc)
for (unsigned int j = i + 1; j < numParticles; ++j) {
const double drx = xptr[i] - xptr[j];
const double dry = yptr[i] - yptr[j];
const double drz = zptr[i] - zptr[j];
const double drx2 = drx * drx;
const double dry2 = dry * dry;
const double drz2 = drz * drz;
const double dr2 = drx2 + dry2 + drz2;
// if second particle is a dummy, we skip the interaction.
const bool mask = ownedStatePtr[j] != OwnershipState::dummy;
const double density = mask ? massptr[j] * SPHKernels::W(dr2, smthptr[i]) : 0.;
densacc += density;
// Newton 3:
// W is symmetric in dr, so no -dr needed, i.e. we can reuse dr
const double density2 = mask ? massptr[i] * SPHKernels::W(dr2, smthptr[j]) : 0.;
densityptr[j] += density2;
}
densityptr[i] += densacc;
}
}
/**
* @copydoc Functor::SoAFunctorPair(SoAView<SoAArraysType>, SoAView<SoAArraysType>, bool)
*/
void SoAFunctorPair(SoAView<SoAArraysType> soa1, SoAView<SoAArraysType> soa2, bool newton3) override {
if (soa1.getNumParticles() == 0 || soa2.getNumParticles() == 0) return;
double *const __restrict xptr1 = soa1.template begin<Particle::AttributeNames::posX>();
double *const __restrict yptr1 = soa1.template begin<Particle::AttributeNames::posY>();
double *const __restrict zptr1 = soa1.template begin<Particle::AttributeNames::posZ>();
double *const __restrict densityptr1 = soa1.template begin<Particle::AttributeNames::density>();
double *const __restrict smthptr1 = soa1.template begin<Particle::AttributeNames::smth>();
double *const __restrict massptr1 = soa1.template begin<Particle::AttributeNames::mass>();
double *const __restrict xptr2 = soa2.template begin<Particle::AttributeNames::posX>();
double *const __restrict yptr2 = soa2.template begin<Particle::AttributeNames::posY>();
double *const __restrict zptr2 = soa2.template begin<Particle::AttributeNames::posZ>();
double *const __restrict densityptr2 = soa2.template begin<Particle::AttributeNames::density>();
double *const __restrict smthptr2 = soa2.template begin<Particle::AttributeNames::smth>();
double *const __restrict massptr2 = soa2.template begin<Particle::AttributeNames::mass>();
const auto *const __restrict ownedStatePtr1 = soa1.template begin<Particle::AttributeNames::ownershipState>();
const auto *const __restrict ownedStatePtr2 = soa2.template begin<Particle::AttributeNames::ownershipState>();
size_t numParticlesi = soa1.getNumParticles();
for (unsigned int i = 0; i < numParticlesi; ++i) {
// checks whether particle i is in the domain box, unused if calculateGlobals is false!
if (ownedStatePtr1[i] == OwnershipState::dummy) {
continue;
}
double densacc = 0.;
size_t numParticlesj = soa2.getNumParticles();
// icpc vectorizes this.
// g++ only with -ffast-math or -funsafe-math-optimizations
#pragma omp simd reduction(+ : densacc)
for (unsigned int j = 0; j < numParticlesj; ++j) {
const double drx = xptr1[i] - xptr2[j];
const double dry = yptr1[i] - yptr2[j];
const double drz = zptr1[i] - zptr2[j];
const double drx2 = drx * drx;
const double dry2 = dry * dry;
const double drz2 = drz * drz;
const double dr2 = drx2 + dry2 + drz2;
// if second particle is a dummy, we skip the interaction.
const bool mask = ownedStatePtr2[j] != OwnershipState::dummy;
const double density = mask ? massptr2[j] * SPHKernels::W(dr2, smthptr1[i]) : 0.;
densacc += density;
if (newton3) {
// Newton 3:
// W is symmetric in dr, so no -dr needed, i.e. we can reuse dr
const double density2 = mask ? massptr1[i] * SPHKernels::W(dr2, smthptr2[j]) : 0.;
densityptr2[j] += density2;
}
}
densityptr1[i] += densacc;
}
}
// clang-format off
/**
* @copydoc Functor::SoAFunctorVerlet(SoAView<SoAArraysType> soa, const size_t indexFirst, const std::vector<size_t, autopas::AlignedAllocator<size_t>> &neighborList, bool newton3)
*/
// clang-format on
void SoAFunctorVerlet(SoAView<SoAArraysType> soa, const size_t indexFirst,
const std::vector<size_t, autopas::AlignedAllocator<size_t>> &neighborList,
bool newton3) override {
if (soa.getNumParticles() == 0) return;
const auto *const __restrict ownedStatePtr = soa.template begin<Particle::AttributeNames::ownershipState>();
// checks whether particle i is owned.
if (ownedStatePtr[indexFirst] == OwnershipState::dummy) {
return;
}
double *const __restrict xptr = soa.template begin<Particle::AttributeNames::posX>();
double *const __restrict yptr = soa.template begin<Particle::AttributeNames::posY>();
double *const __restrict zptr = soa.template begin<Particle::AttributeNames::posZ>();
double *const __restrict densityptr = soa.template begin<Particle::AttributeNames::density>();
double *const __restrict smthptr = soa.template begin<Particle::AttributeNames::smth>();
double *const __restrict massptr = soa.template begin<Particle::AttributeNames::mass>();
double densacc = 0;
const auto ¤tList = neighborList;
size_t listSize = currentList.size();
// icpc vectorizes this.
// g++ only with -ffast-math or -funsafe-math-optimizations
#pragma omp simd reduction(+ : densacc)
for (unsigned int j = 0; j < listSize; ++j) {
const double drx = xptr[indexFirst] - xptr[currentList[j]];
const double dry = yptr[indexFirst] - yptr[currentList[j]];
const double drz = zptr[indexFirst] - zptr[currentList[j]];
const double drx2 = drx * drx;
const double dry2 = dry * dry;
const double drz2 = drz * drz;
const double dr2 = drx2 + dry2 + drz2;
// if second particle is a dummy, we skip the interaction.
const bool mask = ownedStatePtr[currentList[j]] != OwnershipState::dummy;
const double density = mask ? massptr[currentList[j]] * SPHKernels::W(dr2, smthptr[indexFirst]) : 0.;
densacc += density;
if (newton3) {
// Newton 3:
// W is symmetric in dr, so no -dr needed, i.e. we can reuse dr
const double density2 = mask ? massptr[indexFirst] * SPHKernels::W(dr2, smthptr[currentList[j]]) : 0.;
densityptr[currentList[j]] += density2;
}
}
densityptr[indexFirst] += densacc;
}
/**
* @copydoc Functor::getNeededAttr()
*/
constexpr static auto getNeededAttr() {
return std::array<typename Particle::AttributeNames, 7>{
Particle::AttributeNames::mass, Particle::AttributeNames::posX, Particle::AttributeNames::posY,
Particle::AttributeNames::posZ, Particle::AttributeNames::smth, Particle::AttributeNames::density,
Particle::AttributeNames::ownershipState};
}
/**
* @copydoc Functor::getNeededAttr(std::false_type)
*/
constexpr static auto getNeededAttr(std::false_type) {
return std::array<typename Particle::AttributeNames, 6>{
Particle::AttributeNames::mass, Particle::AttributeNames::posX, Particle::AttributeNames::posY,
Particle::AttributeNames::posZ, Particle::AttributeNames::smth, Particle::AttributeNames::ownershipState};
}
/**
* @copydoc Functor::getComputedAttr()
*/
constexpr static auto getComputedAttr() {
return std::array<typename Particle::AttributeNames, 1>{Particle::AttributeNames::density};
}
};
} // namespace autopas::sph
|
micro-app-soa-openmp.c | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <getopt.h>
#include <lua.h>
#include <lauxlib.h>
#include <lualib.h>
#define NPOINTS 10000
#define NEDGES 10000
struct graph {
int v0[NEDGES];
int v1[NEDGES];
float v0_data[NEDGES][3];
float v1_data[NEDGES][3];
float data[NEDGES];
};
float pt_data[NPOINTS][3];
float edge_data[NEDGES];
struct graph gr;
void print_help() {
printf("Usage: \n");
printf("\t --help print this message and exit \n");
printf("\t --type Type of graph, must be one of:\n");
printf("\t\t\t pure_random \n");
printf("\t\t\t regular_random \n");
printf("\t\t\t contiguous \n");
printf("\t --nloops Number of repetitions, must be \n");
printf("\t at least one. \n");
printf("\t --file File from which to read graph \n");
}
double timer() {
struct timeval tp;
struct timezone tzp;
long i;
i = gettimeofday(&tp, &tzp);
return ((double)tp.tv_sec) + ((double) tp.tv_usec) * 1e-6;
}
int graph_init(char* graph_type, char* fname) {
lua_State *L;
int i, j, k, v;
L = luaL_newstate();
luaL_openlibs(L);
luaL_loadfile(L, "graph.lua");
lua_pcall(L, 0, 0, 0);
lua_getglobal(L, "create_graph");
lua_pushstring(L, graph_type);
lua_pushinteger(L, NPOINTS);
lua_pushinteger(L, NEDGES);
if (fname != NULL) {
lua_pushstring(L, fname);
lua_call(L, 4, 1);
} else {
lua_call(L, 3, 1);
}
/* Table is now sitting at the top of the stack */
i = 0;
lua_pushnil(L); /* Make sure lua_next starts at beginning */
while (lua_next(L, -2) != 0) {
// fetch first key
k = lua_tointeger(L, -2);
lua_pushnil(L);
while (lua_next(L, -2) != 0) { // loop over neighbors
lua_pop(L,1);
v = lua_tointeger(L, -1);
// build the graph
gr.v0[i] = k - 1;
gr.v1[i] = v - 1;
for (j = 0; j < 3; j++) {
gr.v0_data[i][j] = 0;
gr.v1_data[i][j] = 0;
}
i++;
}
lua_pop(L,1);
}
lua_close(L);
if (i == 0) {
return -1;
} else {
return 0;
}
}
int data_init() {
int i;
for (i = 0; i < NPOINTS; i++) {
pt_data[i][0] = 1;
pt_data[i][1] = 1;
pt_data[i][2] = 1;
}
return 0;
}
int edge_data_init() {
int i;
for (i = 0; i < NEDGES; i++) {
edge_data[i] = 1;
}
return 0;
}
int edge_gather() {
int i,j;
int v0;
int v1;
#pragma omp for \
private(i, j, v0, v1)
for (i = 0; i < NEDGES; i++) {
v0 = gr.v0[i];
v1 = gr.v1[i];
gr.v0_data[i][0] = pt_data[v0][0];
gr.v0_data[i][1] = pt_data[v0][1];
gr.v0_data[i][2] = pt_data[v0][2];
gr.v1_data[i][0] = pt_data[v1][0];
gr.v1_data[i][1] = pt_data[v1][1];
gr.v1_data[i][2] = pt_data[v1][2];
gr.data[i] = edge_data[i];
}
return 0;
}
int edge_compute() {
int i, j;
float v0_p0, v0_p1, v0_p2;
float v1_p0, v1_p1, v1_p2;
float x0, x1, x2;
float e_data;
#pragma omp parallel for \
private(i, j, v0_p0, v0_p1, v0_p2) \
private(v1_p0, v1_p1, v1_p2) \
private(x0, x1, x2, e_data)
for (i = 0; i < NEDGES; i++) {
v0_p0 = gr.v0_data[i][0];
v0_p1 = gr.v0_data[i][1];
v0_p2 = gr.v0_data[i][2];
v1_p0 = gr.v1_data[i][0];
v1_p1 = gr.v1_data[i][1];
v1_p2 = gr.v1_data[i][2];
e_data = gr.data[i];
x0 = (v0_p0 + v1_p0) * e_data;
x1 = (v0_p1 + v1_p1) * e_data;
x2 = (v0_p2 + v1_p2) * e_data;
gr.v0_data[i][0] = x0;
gr.v0_data[i][1] = x1;
gr.v0_data[i][2] = x2;
gr.v1_data[i][0] = x0;
gr.v1_data[i][1] = x1;
gr.v1_data[i][2] = x2;
}
return 0;
}
int edge_scatter() {
int i;
int v0;
int v1;
#pragma omp for \
private(i, v0, v1)
for (i = 0; i < NEDGES; i++) {
v0 = gr.v0[i];
v1 = gr.v1[i];
#pragma omp atomic
pt_data[v0][0] += gr.v0_data[i][0];
#pragma omp atomic
pt_data[v0][1] += gr.v0_data[i][1];
#pragma omp atomic
pt_data[v0][2] += gr.v0_data[i][2];
#pragma omp atomic
pt_data[v1][0] += gr.v1_data[i][0];
#pragma omp atomic
pt_data[v1][1] += gr.v1_data[i][1];
#pragma omp atomic
pt_data[v1][2] += gr.v1_data[i][2];
}
return 0;
}
int main(int argc, char** argv) {
int i;
int rv;
double time0, time1;
int c, opt_i;
int nloops = 0;
char* gt = "";
char* fname = "";
static struct option long_opts[] = {
{"help", no_argument, 0, 0},
{"type", required_argument, 0, 0},
{"nloops", required_argument, 0, 0},
{"file", required_argument, 0, 0}
};
/* Parse command-line arguments */
while (1) {
c = getopt_long(argc, argv, "",
long_opts, &opt_i);
if (c == -1) {
break;
}
if (c == 0) {
switch (opt_i) {
case 0:
print_help();
exit(0);
case 1:
gt = optarg;
break;
case 2:
nloops = atoi(optarg);
break;
case 3:
fname = optarg;
break;
}
} else {
print_help();
exit(0);
}
}
/* check for errors */
if (gt == NULL || nloops < 1) {
print_help();
exit(0);
}
// initialize data structures
rv = graph_init(gt, fname);
if (rv < 0) {
printf("Error creating graph. \n");
exit(0);
}
// initialize data structures
data_init();
edge_data_init();
// loop
time0 = timer();
for (i = 0; i < nloops; i++) {
edge_gather();
edge_compute();
edge_scatter();
}
time1 = timer();
// print results
for (i = 0; i < 10; i++) {
printf("%i : %f %f %f \n", i, pt_data[i][0], pt_data[i][1], pt_data[i][2]);
}
printf("Time: %f s \n", (time1 - time0) / ((float) nloops));
return 0;
}
|
lu.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 3.0 structured OpenMP C versions - LU
This benchmark is an OpenMP C version of the NPB LU code.
The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions
in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Authors: S. Weeratunga
V. Venkatakrishnan
E. Barszcz
M. Yarrow
OpenMP C version: S. Satoh
3.0 structure translation: M. Popov
--------------------------------------------------------------------*/
#include "../common/npb-C.h"
#include "../paging_benchmark.h"
#include <nautilus/nautilus.h>
#include <nautilus/shell.h>
#include "../math/nas_math.h"
/* global variables */
#include "applu.h"
#if defined(_OPENMP)
/* for thread synchronization */
static boolean flag[ISIZ1/2*2+1];
#endif /* _OPENMP */
/* function declarations */
static void blts (int nx, int ny, int nz, int k,
double omega,
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double ldz[ISIZ1][ISIZ2][5][5],
double ldy[ISIZ1][ISIZ2][5][5],
double ldx[ISIZ1][ISIZ2][5][5],
double d[ISIZ1][ISIZ2][5][5],
int ist, int iend, int jst, int jend,
int nx0, int ny0 );
static void buts(int nx, int ny, int nz, int k,
double omega,
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double tv[ISIZ1][ISIZ2][5],
double d[ISIZ1][ISIZ2][5][5],
double udx[ISIZ1][ISIZ2][5][5],
double udy[ISIZ1][ISIZ2][5][5],
double udz[ISIZ1][ISIZ2][5][5],
int ist, int iend, int jst, int jend,
int nx0, int ny0 );
static void domain(void);
static void erhs(void);
static void error(void);
static void exact( int i, int j, int k, double u000ijk[5] );
static void jacld(int k);
static void jacu(int k);
static void l2norm (int nx0, int ny0, int nz0,
int ist, int iend,
int jst, int jend,
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double sum[5]);
static void pintgr(void);
static void read_input(void);
static void rhs(void);
static void setbv(void);
static void setcoeff(void);
static void setiv(void);
static void ssor(void);
static void verify(double xcr[5], double xce[5], double xci,
char *class, boolean *verified);
/*--------------------------------------------------------------------
program applu
--------------------------------------------------------------------*/
static int program_LU(char *_buf, void* _priv);
static struct shell_cmd_impl nas_lu_impl = {
.cmd = "nas-lu",
.help_str = "NAS parallel benchmark LU",
.handler = program_LU,
};
nk_register_shell_cmd(nas_lu_impl);
#ifdef NAUT_CONFIG_ASPACE_PAGING
int program_LU_paging(char * _buf, void *_priv){
return paging_wrapper(_buf, _priv, &program_LU);
}
static struct shell_cmd_impl nas_lu_paging_impl = {
.cmd = "nas-lu-paging",
.help_str = "NAS parallel benchmark LU with paging",
.handler = program_LU_paging,
};
nk_register_shell_cmd(nas_lu_paging_impl);
#endif
int program_LU(char * _buf, void *_priv) {
/*--------------------------------------------------------------------
c
c driver for the performance evaluation of the solver for
c five coupled parabolic/elliptic partial differential equations.
c
--------------------------------------------------------------------*/
char class;
boolean verified;
double mflops;
int nthreads = 1;
/*--------------------------------------------------------------------
c read input data
--------------------------------------------------------------------*/
read_input();
/*--------------------------------------------------------------------
c set up domain sizes
--------------------------------------------------------------------*/
domain();
/*--------------------------------------------------------------------
c set up coefficients
--------------------------------------------------------------------*/
setcoeff();
/*--------------------------------------------------------------------
c set the boundary values for dependent variables
--------------------------------------------------------------------*/
setbv();
/*--------------------------------------------------------------------
c set the initial values for dependent variables
--------------------------------------------------------------------*/
setiv();
/*--------------------------------------------------------------------
c compute the forcing term based on prescribed exact solution
--------------------------------------------------------------------*/
erhs();
#pragma omp parallel
{
#if defined(_OPENMP)
#pragma omp master
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
}
/*--------------------------------------------------------------------
c perform the SSOR iterations
--------------------------------------------------------------------*/
ssor();
/*--------------------------------------------------------------------
c compute the solution error
--------------------------------------------------------------------*/
error();
/*--------------------------------------------------------------------
c compute the surface integral
--------------------------------------------------------------------*/
pintgr();
/*--------------------------------------------------------------------
c verification test
--------------------------------------------------------------------*/
verify ( rsdnm, errnm, frc, &class, &verified );
mflops = (double)itmax*(1984.77*(double)nx0
*(double)ny0
*(double)nz0
-10923.3*pow2((double)( nx0+ny0+nz0 )/3.0)
+27770.9* (double)( nx0+ny0+nz0 )/3.0
-144010.0)
/ (maxtime*1000000.0);
c_print_results("LU", class, nx0,
ny0, nz0, itmax, nthreads,
maxtime, mflops, " floating point", verified,
NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6,
"(none)");
return 0;
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void blts (int nx, int ny, int nz, int k,
double omega,
/*--------------------------------------------------------------------
c To improve cache performance, second two dimensions padded by 1
c for even number sizes only. Only needed in v.
--------------------------------------------------------------------*/
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double ldz[ISIZ1][ISIZ2][5][5],
double ldy[ISIZ1][ISIZ2][5][5],
double ldx[ISIZ1][ISIZ2][5][5],
double d[ISIZ1][ISIZ2][5][5],
int ist, int iend, int jst, int jend,
int nx0, int ny0 ) {
/*--------------------------------------------------------------------
c
c compute the regular-sparse, block lower triangular solution:
c
c v <-- ( L-inv ) * v
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, m;
double tmp, tmp1;
double tmat[5][5];
#pragma omp for nowait schedule(static)
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
for (m = 0; m < 5; m++) {
v[i][j][k][m] = v[i][j][k][m]
- omega * ( ldz[i][j][m][0] * v[i][j][k-1][0]
+ ldz[i][j][m][1] * v[i][j][k-1][1]
+ ldz[i][j][m][2] * v[i][j][k-1][2]
+ ldz[i][j][m][3] * v[i][j][k-1][3]
+ ldz[i][j][m][4] * v[i][j][k-1][4] );
}
}
}
#pragma omp for nowait schedule(static)
for (i = ist; i <= iend; i++) {
#if defined(_OPENMP)
if (i != ist) {
while (flag[i-1] == 0) {
#pragma omp flush(flag)
;
}
}
if (i != iend) {
while (flag[i] == 1) {
#pragma omp flush(flag)
;
}
}
#endif /* _OPENMP */
for (j = jst; j <= jend; j++) {
for (m = 0; m < 5; m++) {
v[i][j][k][m] = v[i][j][k][m]
- omega * ( ldy[i][j][m][0] * v[i][j-1][k][0]
+ ldx[i][j][m][0] * v[i-1][j][k][0]
+ ldy[i][j][m][1] * v[i][j-1][k][1]
+ ldx[i][j][m][1] * v[i-1][j][k][1]
+ ldy[i][j][m][2] * v[i][j-1][k][2]
+ ldx[i][j][m][2] * v[i-1][j][k][2]
+ ldy[i][j][m][3] * v[i][j-1][k][3]
+ ldx[i][j][m][3] * v[i-1][j][k][3]
+ ldy[i][j][m][4] * v[i][j-1][k][4]
+ ldx[i][j][m][4] * v[i-1][j][k][4] );
}
/*--------------------------------------------------------------------
c diagonal block inversion
c
c forward elimination
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
tmat[m][0] = d[i][j][m][0];
tmat[m][1] = d[i][j][m][1];
tmat[m][2] = d[i][j][m][2];
tmat[m][3] = d[i][j][m][3];
tmat[m][4] = d[i][j][m][4];
}
tmp1 = 1.0 / tmat[0][0];
tmp = tmp1 * tmat[1][0];
tmat[1][1] = tmat[1][1]
- tmp * tmat[0][1];
tmat[1][2] = tmat[1][2]
- tmp * tmat[0][2];
tmat[1][3] = tmat[1][3]
- tmp * tmat[0][3];
tmat[1][4] = tmat[1][4]
- tmp * tmat[0][4];
v[i][j][k][1] = v[i][j][k][1]
- v[i][j][k][0] * tmp;
tmp = tmp1 * tmat[2][0];
tmat[2][1] = tmat[2][1]
- tmp * tmat[0][1];
tmat[2][2] = tmat[2][2]
- tmp * tmat[0][2];
tmat[2][3] = tmat[2][3]
- tmp * tmat[0][3];
tmat[2][4] = tmat[2][4]
- tmp * tmat[0][4];
v[i][j][k][2] = v[i][j][k][2]
- v[i][j][k][0] * tmp;
tmp = tmp1 * tmat[3][0];
tmat[3][1] = tmat[3][1]
- tmp * tmat[0][1];
tmat[3][2] = tmat[3][2]
- tmp * tmat[0][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[0][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[0][4];
v[i][j][k][3] = v[i][j][k][3]
- v[i][j][k][0] * tmp;
tmp = tmp1 * tmat[4][0];
tmat[4][1] = tmat[4][1]
- tmp * tmat[0][1];
tmat[4][2] = tmat[4][2]
- tmp * tmat[0][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[0][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[0][4];
v[i][j][k][4] = v[i][j][k][4]
- v[i][j][k][0] * tmp;
tmp1 = 1.0 / tmat[ 1][1];
tmp = tmp1 * tmat[ 2][1];
tmat[2][2] = tmat[2][2]
- tmp * tmat[1][2];
tmat[2][3] = tmat[2][3]
- tmp * tmat[1][3];
tmat[2][4] = tmat[2][4]
- tmp * tmat[1][4];
v[i][j][k][2] = v[i][j][k][2]
- v[i][j][k][1] * tmp;
tmp = tmp1 * tmat[3][1];
tmat[3][2] = tmat[3][2]
- tmp * tmat[1][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[1][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[1][4];
v[i][j][k][3] = v[i][j][k][3]
- v[i][j][k][1] * tmp;
tmp = tmp1 * tmat[4][1];
tmat[4][2] = tmat[4][2]
- tmp * tmat[1][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[1][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[1][4];
v[i][j][k][4] = v[i][j][k][4]
- v[i][j][k][1] * tmp;
tmp1 = 1.0 / tmat[2][2];
tmp = tmp1 * tmat[3][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[2][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[2][4];
v[i][j][k][3] = v[i][j][k][3]
- v[i][j][k][2] * tmp;
tmp = tmp1 * tmat[4][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[2][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[2][4];
v[i][j][k][4] = v[i][j][k][4]
- v[i][j][k][2] * tmp;
tmp1 = 1.0 / tmat[3][3];
tmp = tmp1 * tmat[4][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[3][4];
v[i][j][k][4] = v[i][j][k][4]
- v[i][j][k][3] * tmp;
/*--------------------------------------------------------------------
c back substitution
--------------------------------------------------------------------*/
v[i][j][k][4] = v[i][j][k][4]
/ tmat[4][4];
v[i][j][k][3] = v[i][j][k][3]
- tmat[3][4] * v[i][j][k][4];
v[i][j][k][3] = v[i][j][k][3]
/ tmat[3][3];
v[i][j][k][2] = v[i][j][k][2]
- tmat[2][3] * v[i][j][k][3]
- tmat[2][4] * v[i][j][k][4];
v[i][j][k][2] = v[i][j][k][2]
/ tmat[2][2];
v[i][j][k][1] = v[i][j][k][1]
- tmat[1][2] * v[i][j][k][2]
- tmat[1][3] * v[i][j][k][3]
- tmat[1][4] * v[i][j][k][4];
v[i][j][k][1] = v[i][j][k][1]
/ tmat[1][1];
v[i][j][k][0] = v[i][j][k][0]
- tmat[0][1] * v[i][j][k][1]
- tmat[0][2] * v[i][j][k][2]
- tmat[0][3] * v[i][j][k][3]
- tmat[0][4] * v[i][j][k][4];
v[i][j][k][0] = v[i][j][k][0]
/ tmat[0][0];
}
#if defined(_OPENMP)
if (i != ist) flag[i-1] = 0;
if (i != iend) flag[i] = 1;
#pragma omp flush(flag)
#endif /* _OPENMP */
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void buts(int nx, int ny, int nz, int k,
double omega,
/*--------------------------------------------------------------------
c To improve cache performance, second two dimensions padded by 1
c for even number sizes only. Only needed in v.
--------------------------------------------------------------------*/
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double tv[ISIZ1][ISIZ2][5],
double d[ISIZ1][ISIZ2][5][5],
double udx[ISIZ1][ISIZ2][5][5],
double udy[ISIZ1][ISIZ2][5][5],
double udz[ISIZ1][ISIZ2][5][5],
int ist, int iend, int jst, int jend,
int nx0, int ny0 ) {
/*--------------------------------------------------------------------
c
c compute the regular-sparse, block upper triangular solution:
c
c v <-- ( U-inv ) * v
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, m;
double tmp, tmp1;
double tmat[5][5];
#pragma omp for nowait schedule(static)
for (i = iend; i >= ist; i--) {
for (j = jend; j >= jst; j--) {
for (m = 0; m < 5; m++) {
tv[i][j][m] =
omega * ( udz[i][j][m][0] * v[i][j][k+1][0]
+ udz[i][j][m][1] * v[i][j][k+1][1]
+ udz[i][j][m][2] * v[i][j][k+1][2]
+ udz[i][j][m][3] * v[i][j][k+1][3]
+ udz[i][j][m][4] * v[i][j][k+1][4] );
}
}
}
#pragma omp for nowait schedule(static)
for (i = iend; i >= ist; i--) {
#if defined(_OPENMP)
if (i != iend) {
while (flag[i+1] == 0) {
#pragma omp flush(flag)
;
}
}
if (i != ist) {
while (flag[i] == 1) {
#pragma omp flush(flag)
;
}
}
#endif /* _OPENMP */
for (j = jend; j >= jst; j--) {
for (m = 0; m < 5; m++) {
tv[i][j][m] = tv[i][j][m]
+ omega * ( udy[i][j][m][0] * v[i][j+1][k][0]
+ udx[i][j][m][0] * v[i+1][j][k][0]
+ udy[i][j][m][1] * v[i][j+1][k][1]
+ udx[i][j][m][1] * v[i+1][j][k][1]
+ udy[i][j][m][2] * v[i][j+1][k][2]
+ udx[i][j][m][2] * v[i+1][j][k][2]
+ udy[i][j][m][3] * v[i][j+1][k][3]
+ udx[i][j][m][3] * v[i+1][j][k][3]
+ udy[i][j][m][4] * v[i][j+1][k][4]
+ udx[i][j][m][4] * v[i+1][j][k][4] );
}
/*--------------------------------------------------------------------
c diagonal block inversion
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
tmat[m][0] = d[i][j][m][0];
tmat[m][1] = d[i][j][m][1];
tmat[m][2] = d[i][j][m][2];
tmat[m][3] = d[i][j][m][3];
tmat[m][4] = d[i][j][m][4];
}
tmp1 = 1.0 / tmat[0][0];
tmp = tmp1 * tmat[1][0];
tmat[1][1] = tmat[1][1]
- tmp * tmat[0][1];
tmat[1][2] = tmat[1][2]
- tmp * tmat[0][2];
tmat[1][3] = tmat[1][3]
- tmp * tmat[0][3];
tmat[1][4] = tmat[1][4]
- tmp * tmat[0][4];
tv[i][j][1] = tv[i][j][1]
- tv[i][j][0] * tmp;
tmp = tmp1 * tmat[2][0];
tmat[2][1] = tmat[2][1]
- tmp * tmat[0][1];
tmat[2][2] = tmat[2][2]
- tmp * tmat[0][2];
tmat[2][3] = tmat[2][3]
- tmp * tmat[0][3];
tmat[2][4] = tmat[2][4]
- tmp * tmat[0][4];
tv[i][j][2] = tv[i][j][2]
- tv[i][j][0] * tmp;
tmp = tmp1 * tmat[3][0];
tmat[3][1] = tmat[3][1]
- tmp * tmat[0][1];
tmat[3][2] = tmat[3][2]
- tmp * tmat[0][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[0][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[0][4];
tv[i][j][3] = tv[i][j][3]
- tv[i][j][0] * tmp;
tmp = tmp1 * tmat[4][0];
tmat[4][1] = tmat[4][1]
- tmp * tmat[0][1];
tmat[4][2] = tmat[4][2]
- tmp * tmat[0][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[0][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[0][4];
tv[i][j][4] = tv[i][j][4]
- tv[i][j][0] * tmp;
tmp1 = 1.0 / tmat[1][1];
tmp = tmp1 * tmat[2][1];
tmat[2][2] = tmat[2][2]
- tmp * tmat[1][2];
tmat[2][3] = tmat[2][3]
- tmp * tmat[1][3];
tmat[2][4] = tmat[2][4]
- tmp * tmat[1][4];
tv[i][j][2] = tv[i][j][2]
- tv[i][j][1] * tmp;
tmp = tmp1 * tmat[3][1];
tmat[3][2] = tmat[3][2]
- tmp * tmat[1][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[1][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[1][4];
tv[i][j][3] = tv[i][j][3]
- tv[i][j][1] * tmp;
tmp = tmp1 * tmat[4][1];
tmat[4][2] = tmat[4][2]
- tmp * tmat[1][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[1][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[1][4];
tv[i][j][4] = tv[i][j][4]
- tv[i][j][1] * tmp;
tmp1 = 1.0 / tmat[2][2];
tmp = tmp1 * tmat[3][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[2][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[2][4];
tv[i][j][3] = tv[i][j][3]
- tv[i][j][2] * tmp;
tmp = tmp1 * tmat[4][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[2][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[2][4];
tv[i][j][4] = tv[i][j][4]
- tv[i][j][2] * tmp;
tmp1 = 1.0 / tmat[3][3];
tmp = tmp1 * tmat[4][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[3][4];
tv[i][j][4] = tv[i][j][4]
- tv[i][j][3] * tmp;
/*--------------------------------------------------------------------
c back substitution
--------------------------------------------------------------------*/
tv[i][j][4] = tv[i][j][4]
/ tmat[4][4];
tv[i][j][3] = tv[i][j][3]
- tmat[3][4] * tv[i][j][4];
tv[i][j][3] = tv[i][j][3]
/ tmat[3][3];
tv[i][j][2] = tv[i][j][2]
- tmat[2][3] * tv[i][j][3]
- tmat[2][4] * tv[i][j][4];
tv[i][j][2] = tv[i][j][2]
/ tmat[2][2];
tv[i][j][1] = tv[i][j][1]
- tmat[1][2] * tv[i][j][2]
- tmat[1][3] * tv[i][j][3]
- tmat[1][4] * tv[i][j][4];
tv[i][j][1] = tv[i][j][1]
/ tmat[1][1];
tv[i][j][0] = tv[i][j][0]
- tmat[0][1] * tv[i][j][1]
- tmat[0][2] * tv[i][j][2]
- tmat[0][3] * tv[i][j][3]
- tmat[0][4] * tv[i][j][4];
tv[i][j][0] = tv[i][j][0]
/ tmat[0][0];
v[i][j][k][0] = v[i][j][k][0] - tv[i][j][0];
v[i][j][k][1] = v[i][j][k][1] - tv[i][j][1];
v[i][j][k][2] = v[i][j][k][2] - tv[i][j][2];
v[i][j][k][3] = v[i][j][k][3] - tv[i][j][3];
v[i][j][k][4] = v[i][j][k][4] - tv[i][j][4];
}
#if defined(_OPENMP)
if (i != iend) flag[i+1] = 0;
if (i != ist) flag[i] = 1;
#pragma omp flush(flag)
#endif /* _OPENMP */
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void domain(void) {
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
nx = nx0;
ny = ny0;
nz = nz0;
/*--------------------------------------------------------------------
c check the sub-domain size
--------------------------------------------------------------------*/
if ( nx < 4 || ny < 4 || nz < 4 ) {
printf(" SUBDOMAIN SIZE IS TOO SMALL - \n"
" ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n"
" SO THAT NX, NY AND NZ ARE GREATER THAN OR EQUAL\n"
" TO 4 THEY ARE CURRENTLY%3d%3d%3d\n", nx, ny, nz);
exit(1);
}
if ( nx > ISIZ1 || ny > ISIZ2 || nz > ISIZ3 ) {
printf(" SUBDOMAIN SIZE IS TOO LARGE - \n"
" ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n"
" SO THAT NX, NY AND NZ ARE LESS THAN OR EQUAL TO \n"
" ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY. THEY ARE\n"
" CURRENTLY%4d%4d%4d\n", nx, ny, nz);
exit(1);
}
/*--------------------------------------------------------------------
c set up the start and end in i and j extents for all processors
--------------------------------------------------------------------*/
ist = 1;
iend = nx - 2;
jst = 1;
jend = ny - 2;
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void erhs(void) {
#pragma omp parallel
{
/*--------------------------------------------------------------------
c
c compute the right hand side based on exact solution
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
int iglob, jglob;
int L1, L2;
int ist1, iend1;
int jst1, jend1;
double dsspm;
double xi, eta, zeta;
double q;
double u21, u31, u41;
double tmp;
double u21i, u31i, u41i, u51i;
double u21j, u31j, u41j, u51j;
double u21k, u31k, u41k, u51k;
double u21im1, u31im1, u41im1, u51im1;
double u21jm1, u31jm1, u41jm1, u51jm1;
double u21km1, u31km1, u41km1, u51km1;
dsspm = dssp;
#pragma omp for
for (i = 0; i < nx; i++) {
for (j = 0; j < ny; j++) {
for (k = 0; k < nz; k++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = 0.0;
}
}
}
}
#pragma omp for
for (i = 0; i < nx; i++) {
iglob = i;
xi = ( (double)(iglob) ) / ( nx0 - 1 );
for (j = 0; j < ny; j++) {
jglob = j;
eta = ( (double)(jglob) ) / ( ny0 - 1 );
for (k = 0; k < nz; k++) {
zeta = ( (double)(k) ) / ( nz - 1 );
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = ce[m][0]
+ ce[m][1] * xi
+ ce[m][2] * eta
+ ce[m][3] * zeta
+ ce[m][4] * xi * xi
+ ce[m][5] * eta * eta
+ ce[m][6] * zeta * zeta
+ ce[m][7] * xi * xi * xi
+ ce[m][8] * eta * eta * eta
+ ce[m][9] * zeta * zeta * zeta
+ ce[m][10] * xi * xi * xi * xi
+ ce[m][11] * eta * eta * eta * eta
+ ce[m][12] * zeta * zeta * zeta * zeta;
}
}
}
}
/*--------------------------------------------------------------------
c xi-direction flux differences
--------------------------------------------------------------------*/
L1 = 0;
L2 = nx-1;
#pragma omp for
for (i = L1; i <= L2; i++) {
for (j = jst; j <= jend; j++) {
for (k = 1; k < nz - 1; k++) {
flux[i][j][k][0] = rsd[i][j][k][1];
u21 = rsd[i][j][k][1] / rsd[i][j][k][0];
q = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1]
+ rsd[i][j][k][2] * rsd[i][j][k][2]
+ rsd[i][j][k][3] * rsd[i][j][k][3] )
/ rsd[i][j][k][0];
flux[i][j][k][1] = rsd[i][j][k][1] * u21 + C2 *
( rsd[i][j][k][4] - q );
flux[i][j][k][2] = rsd[i][j][k][2] * u21;
flux[i][j][k][3] = rsd[i][j][k][3] * u21;
flux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u21;
}
}
}
#pragma omp for
for (j = jst; j <= jend; j++) {
for (k = 1; k <= nz - 2; k++) {
for (i = ist; i <= iend; i++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] );
}
}
for (i = ist; i <= L2; i++) {
tmp = 1.0 / rsd[i][j][k][0];
u21i = tmp * rsd[i][j][k][1];
u31i = tmp * rsd[i][j][k][2];
u41i = tmp * rsd[i][j][k][3];
u51i = tmp * rsd[i][j][k][4];
tmp = 1.0 / rsd[i-1][j][k][0];
u21im1 = tmp * rsd[i-1][j][k][1];
u31im1 = tmp * rsd[i-1][j][k][2];
u41im1 = tmp * rsd[i-1][j][k][3];
u51im1 = tmp * rsd[i-1][j][k][4];
flux[i][j][k][1] = (4.0/3.0) * tx3 *
( u21i - u21im1 );
flux[i][j][k][2] = tx3 * ( u31i - u31im1 );
flux[i][j][k][3] = tx3 * ( u41i - u41im1 );
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* tx3 * ( ( u21i * u21i + u31i * u31i + u41i * u41i )
- ( u21im1*u21im1 + u31im1*u31im1 + u41im1*u41im1 ) )
+ (1.0/6.0)
* tx3 * ( u21i*u21i - u21im1*u21im1 )
+ C1 * C5 * tx3 * ( u51i - u51im1 );
}
for (i = ist; i <= iend; i++) {
frct[i][j][k][0] = frct[i][j][k][0]
+ dx1 * tx1 * ( rsd[i-1][j][k][0]
- 2.0 * rsd[i][j][k][0]
+ rsd[i+1][j][k][0] );
frct[i][j][k][1] = frct[i][j][k][1]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][1] - flux[i][j][k][1] )
+ dx2 * tx1 * ( rsd[i-1][j][k][1]
- 2.0 * rsd[i][j][k][1]
+ rsd[i+1][j][k][1] );
frct[i][j][k][2] = frct[i][j][k][2]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][2] - flux[i][j][k][2] )
+ dx3 * tx1 * ( rsd[i-1][j][k][2]
- 2.0 * rsd[i][j][k][2]
+ rsd[i+1][j][k][2] );
frct[i][j][k][3] = frct[i][j][k][3]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][3] - flux[i][j][k][3] )
+ dx4 * tx1 * ( rsd[i-1][j][k][3]
- 2.0 * rsd[i][j][k][3]
+ rsd[i+1][j][k][3] );
frct[i][j][k][4] = frct[i][j][k][4]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][4] - flux[i][j][k][4] )
+ dx5 * tx1 * ( rsd[i-1][j][k][4]
- 2.0 * rsd[i][j][k][4]
+ rsd[i+1][j][k][4] );
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
frct[1][j][k][m] = frct[1][j][k][m]
- dsspm * ( + 5.0 * rsd[1][j][k][m]
- 4.0 * rsd[2][j][k][m]
+ rsd[3][j][k][m] );
frct[2][j][k][m] = frct[2][j][k][m]
- dsspm * ( - 4.0 * rsd[1][j][k][m]
+ 6.0 * rsd[2][j][k][m]
- 4.0 * rsd[3][j][k][m]
+ rsd[4][j][k][m] );
}
ist1 = 3;
iend1 = nx - 4;
for (i = ist1; i <=iend1; i++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- dsspm * ( rsd[i-2][j][k][m]
- 4.0 * rsd[i-1][j][k][m]
+ 6.0 * rsd[i][j][k][m]
- 4.0 * rsd[i+1][j][k][m]
+ rsd[i+2][j][k][m] );
}
}
for (m = 0; m < 5; m++) {
frct[nx-3][j][k][m] = frct[nx-3][j][k][m]
- dsspm * ( rsd[nx-5][j][k][m]
- 4.0 * rsd[nx-4][j][k][m]
+ 6.0 * rsd[nx-3][j][k][m]
- 4.0 * rsd[nx-2][j][k][m] );
frct[nx-2][j][k][m] = frct[nx-2][j][k][m]
- dsspm * ( rsd[nx-4][j][k][m]
- 4.0 * rsd[nx-3][j][k][m]
+ 5.0 * rsd[nx-2][j][k][m] );
}
}
}
/*--------------------------------------------------------------------
c eta-direction flux differences
--------------------------------------------------------------------*/
L1 = 0;
L2 = ny-1;
#pragma omp for
for (i = ist; i <= iend; i++) {
for (j = L1; j <= L2; j++) {
for (k = 1; k <= nz - 2; k++) {
flux[i][j][k][0] = rsd[i][j][k][2];
u31 = rsd[i][j][k][2] / rsd[i][j][k][0];
q = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1]
+ rsd[i][j][k][2] * rsd[i][j][k][2]
+ rsd[i][j][k][3] * rsd[i][j][k][3] )
/ rsd[i][j][k][0];
flux[i][j][k][1] = rsd[i][j][k][1] * u31;
flux[i][j][k][2] = rsd[i][j][k][2] * u31 + C2 *
( rsd[i][j][k][4] - q );
flux[i][j][k][3] = rsd[i][j][k][3] * u31;
flux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u31;
}
}
}
#pragma omp for
for (i = ist; i <= iend; i++) {
for (k = 1; k <= nz - 2; k++) {
for (j = jst; j <= jend; j++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] );
}
}
for (j = jst; j <= L2; j++) {
tmp = 1.0 / rsd[i][j][k][0];
u21j = tmp * rsd[i][j][k][1];
u31j = tmp * rsd[i][j][k][2];
u41j = tmp * rsd[i][j][k][3];
u51j = tmp * rsd[i][j][k][4];
tmp = 1.0 / rsd[i][j-1][k][0];
u21jm1 = tmp * rsd[i][j-1][k][1];
u31jm1 = tmp * rsd[i][j-1][k][2];
u41jm1 = tmp * rsd[i][j-1][k][3];
u51jm1 = tmp * rsd[i][j-1][k][4];
flux[i][j][k][1] = ty3 * ( u21j - u21jm1 );
flux[i][j][k][2] = (4.0/3.0) * ty3 *
( u31j - u31jm1 );
flux[i][j][k][3] = ty3 * ( u41j - u41jm1 );
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* ty3 * ( ( u21j *u21j + u31j *u31j + u41j *u41j )
- ( u21jm1*u21jm1 + u31jm1*u31jm1 + u41jm1*u41jm1 ) )
+ (1.0/6.0)
* ty3 * ( u31j*u31j - u31jm1*u31jm1 )
+ C1 * C5 * ty3 * ( u51j - u51jm1 );
}
for (j = jst; j <= jend; j++) {
frct[i][j][k][0] = frct[i][j][k][0]
+ dy1 * ty1 * ( rsd[i][j-1][k][0]
- 2.0 * rsd[i][j][k][0]
+ rsd[i][j+1][k][0] );
frct[i][j][k][1] = frct[i][j][k][1]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][1] - flux[i][j][k][1] )
+ dy2 * ty1 * ( rsd[i][j-1][k][1]
- 2.0 * rsd[i][j][k][1]
+ rsd[i][j+1][k][1] );
frct[i][j][k][2] = frct[i][j][k][2]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][2] - flux[i][j][k][2] )
+ dy3 * ty1 * ( rsd[i][j-1][k][2]
- 2.0 * rsd[i][j][k][2]
+ rsd[i][j+1][k][2] );
frct[i][j][k][3] = frct[i][j][k][3]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][3] - flux[i][j][k][3] )
+ dy4 * ty1 * ( rsd[i][j-1][k][3]
- 2.0 * rsd[i][j][k][3]
+ rsd[i][j+1][k][3] );
frct[i][j][k][4] = frct[i][j][k][4]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][4] - flux[i][j][k][4] )
+ dy5 * ty1 * ( rsd[i][j-1][k][4]
- 2.0 * rsd[i][j][k][4]
+ rsd[i][j+1][k][4] );
}
/*--------------------------------------------------------------------
c fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
frct[i][1][k][m] = frct[i][1][k][m]
- dsspm * ( + 5.0 * rsd[i][1][k][m]
- 4.0 * rsd[i][2][k][m]
+ rsd[i][3][k][m] );
frct[i][2][k][m] = frct[i][2][k][m]
- dsspm * ( - 4.0 * rsd[i][1][k][m]
+ 6.0 * rsd[i][2][k][m]
- 4.0 * rsd[i][3][k][m]
+ rsd[i][4][k][m] );
}
jst1 = 3;
jend1 = ny - 4;
for (j = jst1; j <= jend1; j++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- dsspm * ( rsd[i][j-2][k][m]
- 4.0 * rsd[i][j-1][k][m]
+ 6.0 * rsd[i][j][k][m]
- 4.0 * rsd[i][j+1][k][m]
+ rsd[i][j+2][k][m] );
}
}
for (m = 0; m < 5; m++) {
frct[i][ny-3][k][m] = frct[i][ny-3][k][m]
- dsspm * ( rsd[i][ny-5][k][m]
- 4.0 * rsd[i][ny-4][k][m]
+ 6.0 * rsd[i][ny-3][k][m]
- 4.0 * rsd[i][ny-2][k][m] );
frct[i][ny-2][k][m] = frct[i][ny-2][k][m]
- dsspm * ( rsd[i][ny-4][k][m]
- 4.0 * rsd[i][ny-3][k][m]
+ 5.0 * rsd[i][ny-2][k][m] );
}
}
}
/*--------------------------------------------------------------------
c zeta-direction flux differences
--------------------------------------------------------------------*/
#pragma omp for
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
for (k = 0; k <= nz-1; k++) {
flux[i][j][k][0] = rsd[i][j][k][3];
u41 = rsd[i][j][k][3] / rsd[i][j][k][0];
q = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1]
+ rsd[i][j][k][2] * rsd[i][j][k][2]
+ rsd[i][j][k][3] * rsd[i][j][k][3] )
/ rsd[i][j][k][0];
flux[i][j][k][1] = rsd[i][j][k][1] * u41;
flux[i][j][k][2] = rsd[i][j][k][2] * u41;
flux[i][j][k][3] = rsd[i][j][k][3] * u41 + C2 *
( rsd[i][j][k][4] - q );
flux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u41;
}
for (k = 1; k <= nz - 2; k++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- tz2 * ( flux[i][j][k+1][m] - flux[i][j][k-1][m] );
}
}
for (k = 1; k <= nz-1; k++) {
tmp = 1.0 / rsd[i][j][k][0];
u21k = tmp * rsd[i][j][k][1];
u31k = tmp * rsd[i][j][k][2];
u41k = tmp * rsd[i][j][k][3];
u51k = tmp * rsd[i][j][k][4];
tmp = 1.0 / rsd[i][j][k-1][0];
u21km1 = tmp * rsd[i][j][k-1][1];
u31km1 = tmp * rsd[i][j][k-1][2];
u41km1 = tmp * rsd[i][j][k-1][3];
u51km1 = tmp * rsd[i][j][k-1][4];
flux[i][j][k][1] = tz3 * ( u21k - u21km1 );
flux[i][j][k][2] = tz3 * ( u31k - u31km1 );
flux[i][j][k][3] = (4.0/3.0) * tz3 * ( u41k
- u41km1 );
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* tz3 * ( ( u21k *u21k + u31k *u31k + u41k *u41k )
- ( u21km1*u21km1 + u31km1*u31km1 + u41km1*u41km1 ) )
+ (1.0/6.0)
* tz3 * ( u41k*u41k - u41km1*u41km1 )
+ C1 * C5 * tz3 * ( u51k - u51km1 );
}
for (k = 1; k <= nz - 2; k++) {
frct[i][j][k][0] = frct[i][j][k][0]
+ dz1 * tz1 * ( rsd[i][j][k+1][0]
- 2.0 * rsd[i][j][k][0]
+ rsd[i][j][k-1][0] );
frct[i][j][k][1] = frct[i][j][k][1]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][1] - flux[i][j][k][1] )
+ dz2 * tz1 * ( rsd[i][j][k+1][1]
- 2.0 * rsd[i][j][k][1]
+ rsd[i][j][k-1][1] );
frct[i][j][k][2] = frct[i][j][k][2]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][2] - flux[i][j][k][2] )
+ dz3 * tz1 * ( rsd[i][j][k+1][2]
- 2.0 * rsd[i][j][k][2]
+ rsd[i][j][k-1][2] );
frct[i][j][k][3] = frct[i][j][k][3]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][3] - flux[i][j][k][3] )
+ dz4 * tz1 * ( rsd[i][j][k+1][3]
- 2.0 * rsd[i][j][k][3]
+ rsd[i][j][k-1][3] );
frct[i][j][k][4] = frct[i][j][k][4]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][4] - flux[i][j][k][4] )
+ dz5 * tz1 * ( rsd[i][j][k+1][4]
- 2.0 * rsd[i][j][k][4]
+ rsd[i][j][k-1][4] );
}
/*--------------------------------------------------------------------
c fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
frct[i][j][1][m] = frct[i][j][1][m]
- dsspm * ( + 5.0 * rsd[i][j][1][m]
- 4.0 * rsd[i][j][2][m]
+ rsd[i][j][3][m] );
frct[i][j][2][m] = frct[i][j][2][m]
- dsspm * (- 4.0 * rsd[i][j][1][m]
+ 6.0 * rsd[i][j][2][m]
- 4.0 * rsd[i][j][3][m]
+ rsd[i][j][4][m] );
}
for (k = 3; k <= nz - 4; k++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- dsspm * ( rsd[i][j][k-2][m]
- 4.0 * rsd[i][j][k-1][m]
+ 6.0 * rsd[i][j][k][m]
- 4.0 * rsd[i][j][k+1][m]
+ rsd[i][j][k+2][m] );
}
}
for (m = 0; m < 5; m++) {
frct[i][j][nz-3][m] = frct[i][j][nz-3][m]
- dsspm * ( rsd[i][j][nz-5][m]
- 4.0 * rsd[i][j][nz-4][m]
+ 6.0 * rsd[i][j][nz-3][m]
- 4.0 * rsd[i][j][nz-2][m] );
frct[i][j][nz-2][m] = frct[i][j][nz-2][m]
- dsspm * ( rsd[i][j][nz-4][m]
- 4.0 * rsd[i][j][nz-3][m]
+ 5.0 * rsd[i][j][nz-2][m] );
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void error(void) {
/*--------------------------------------------------------------------
c
c compute the solution error
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
int iglob, jglob;
double tmp;
double u000ijk[5];
for (m = 0; m < 5; m++) {
errnm[m] = 0.0;
}
for (i = ist; i <= iend; i++) {
iglob = i;
for (j = jst; j <= jend; j++) {
jglob = j;
for (k = 1; k <= nz-2; k++) {
exact( iglob, jglob, k, u000ijk );
for (m = 0; m < 5; m++) {
tmp = ( u000ijk[m] - u[i][j][k][m] );
errnm[m] = errnm[m] + tmp *tmp;
}
}
}
}
for (m = 0; m < 5; m++) {
errnm[m] = sqrt ( errnm[m] / ( (nx0-2)*(ny0-2)*(nz0-2) ) );
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void exact( int i, int j, int k, double u000ijk[5] ) {
/*--------------------------------------------------------------------
c
c compute the exact solution at (i,j,k)
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int m;
double xi, eta, zeta;
xi = ((double)i) / (nx0 - 1);
eta = ((double)j) / (ny0 - 1);
zeta = ((double)k) / (nz - 1);
for (m = 0; m < 5; m++) {
u000ijk[m] = ce[m][0]
+ ce[m][1] * xi
+ ce[m][2] * eta
+ ce[m][3] * zeta
+ ce[m][4] * xi * xi
+ ce[m][5] * eta * eta
+ ce[m][6] * zeta * zeta
+ ce[m][7] * xi * xi * xi
+ ce[m][8] * eta * eta * eta
+ ce[m][9] * zeta * zeta * zeta
+ ce[m][10] * xi * xi * xi * xi
+ ce[m][11] * eta * eta * eta * eta
+ ce[m][12] * zeta * zeta * zeta * zeta;
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void jacld(int k) {
/*--------------------------------------------------------------------
c compute the lower triangular part of the jacobian matrix
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j;
double r43;
double c1345;
double c34;
double tmp1, tmp2, tmp3;
r43 = ( 4.0 / 3.0 );
c1345 = C1 * C3 * C4 * C5;
c34 = C3 * C4;
#pragma omp for nowait schedule(static)
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
/*--------------------------------------------------------------------
c form the block daigonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
d[i][j][0][0] = 1.0
+ dt * 2.0 * ( tx1 * dx1
+ ty1 * dy1
+ tz1 * dz1 );
d[i][j][0][1] = 0.0;
d[i][j][0][2] = 0.0;
d[i][j][0][3] = 0.0;
d[i][j][0][4] = 0.0;
d[i][j][1][0] = dt * 2.0
* ( tx1 * ( - r43 * c34 * tmp2 * u[i][j][k][1] )
+ ty1 * ( - c34 * tmp2 * u[i][j][k][1] )
+ tz1 * ( - c34 * tmp2 * u[i][j][k][1] ) );
d[i][j][1][1] = 1.0
+ dt * 2.0
* ( tx1 * r43 * c34 * tmp1
+ ty1 * c34 * tmp1
+ tz1 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx2
+ ty1 * dy2
+ tz1 * dz2 );
d[i][j][1][2] = 0.0;
d[i][j][1][3] = 0.0;
d[i][j][1][4] = 0.0;
d[i][j][2][0] = dt * 2.0
* ( tx1 * ( - c34 * tmp2 * u[i][j][k][2] )
+ ty1 * ( - r43 * c34 * tmp2 * u[i][j][k][2] )
+ tz1 * ( - c34 * tmp2 * u[i][j][k][2] ) );
d[i][j][2][1] = 0.0;
d[i][j][2][2] = 1.0
+ dt * 2.0
* ( tx1 * c34 * tmp1
+ ty1 * r43 * c34 * tmp1
+ tz1 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx3
+ ty1 * dy3
+ tz1 * dz3 );
d[i][j][2][3] = 0.0;
d[i][j][2][4] = 0.0;
d[i][j][3][0] = dt * 2.0
* ( tx1 * ( - c34 * tmp2 * u[i][j][k][3] )
+ ty1 * ( - c34 * tmp2 * u[i][j][k][3] )
+ tz1 * ( - r43 * c34 * tmp2 * u[i][j][k][3] ) );
d[i][j][3][1] = 0.0;
d[i][j][3][2] = 0.0;
d[i][j][3][3] = 1.0
+ dt * 2.0
* ( tx1 * c34 * tmp1
+ ty1 * c34 * tmp1
+ tz1 * r43 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx4
+ ty1 * dy4
+ tz1 * dz4 );
d[i][j][3][4] = 0.0;
d[i][j][4][0] = dt * 2.0
* ( tx1 * ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] )
+ ty1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] )
+ tz1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] ) );
d[i][j][4][1] = dt * 2.0
* ( tx1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][1]
+ ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1]
+ tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1] );
d[i][j][4][2] = dt * 2.0
* ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2]
+ ty1 * ( r43*c34 -c1345 ) * tmp2 * u[i][j][k][2]
+ tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2] );
d[i][j][4][3] = dt * 2.0
* ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3]
+ ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3]
+ tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][3] );
d[i][j][4][4] = 1.0
+ dt * 2.0 * ( tx1 * c1345 * tmp1
+ ty1 * c1345 * tmp1
+ tz1 * c1345 * tmp1 )
+ dt * 2.0 * ( tx1 * dx5
+ ty1 * dy5
+ tz1 * dz5 );
/*--------------------------------------------------------------------
c form the first block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j][k-1][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
a[i][j][0][0] = - dt * tz1 * dz1;
a[i][j][0][1] = 0.0;
a[i][j][0][2] = 0.0;
a[i][j][0][3] = - dt * tz2;
a[i][j][0][4] = 0.0;
a[i][j][1][0] = - dt * tz2
* ( - ( u[i][j][k-1][1]*u[i][j][k-1][3] ) * tmp2 )
- dt * tz1 * ( - c34 * tmp2 * u[i][j][k-1][1] );
a[i][j][1][1] = - dt * tz2 * ( u[i][j][k-1][3] * tmp1 )
- dt * tz1 * c34 * tmp1
- dt * tz1 * dz2 ;
a[i][j][1][2] = 0.0;
a[i][j][1][3] = - dt * tz2 * ( u[i][j][k-1][1] * tmp1 );
a[i][j][1][4] = 0.0;
a[i][j][2][0] = - dt * tz2
* ( - ( u[i][j][k-1][2]*u[i][j][k-1][3] ) * tmp2 )
- dt * tz1 * ( - c34 * tmp2 * u[i][j][k-1][2] );
a[i][j][2][1] = 0.0;
a[i][j][2][2] = - dt * tz2 * ( u[i][j][k-1][3] * tmp1 )
- dt * tz1 * ( c34 * tmp1 )
- dt * tz1 * dz3;
a[i][j][2][3] = - dt * tz2 * ( u[i][j][k-1][2] * tmp1 );
a[i][j][2][4] = 0.0;
a[i][j][3][0] = - dt * tz2
* ( - ( u[i][j][k-1][3] * tmp1 ) *( u[i][j][k-1][3] * tmp1 )
+ 0.50 * C2
* ( ( u[i][j][k-1][1] * u[i][j][k-1][1]
+ u[i][j][k-1][2] * u[i][j][k-1][2]
+ u[i][j][k-1][3] * u[i][j][k-1][3] ) * tmp2 ) )
- dt * tz1 * ( - r43 * c34 * tmp2 * u[i][j][k-1][3] );
a[i][j][3][1] = - dt * tz2
* ( - C2 * ( u[i][j][k-1][1] * tmp1 ) );
a[i][j][3][2] = - dt * tz2
* ( - C2 * ( u[i][j][k-1][2] * tmp1 ) );
a[i][j][3][3] = - dt * tz2 * ( 2.0 - C2 )
* ( u[i][j][k-1][3] * tmp1 )
- dt * tz1 * ( r43 * c34 * tmp1 )
- dt * tz1 * dz4;
a[i][j][3][4] = - dt * tz2 * C2;
a[i][j][4][0] = - dt * tz2
* ( ( C2 * ( u[i][j][k-1][1] * u[i][j][k-1][1]
+ u[i][j][k-1][2] * u[i][j][k-1][2]
+ u[i][j][k-1][3] * u[i][j][k-1][3] ) * tmp2
- C1 * ( u[i][j][k-1][4] * tmp1 ) )
* ( u[i][j][k-1][3] * tmp1 ) )
- dt * tz1
* ( - ( c34 - c1345 ) * tmp3 * (u[i][j][k-1][1]*u[i][j][k-1][1])
- ( c34 - c1345 ) * tmp3 * (u[i][j][k-1][2]*u[i][j][k-1][2])
- ( r43*c34 - c1345 )* tmp3 * (u[i][j][k-1][3]*u[i][j][k-1][3])
- c1345 * tmp2 * u[i][j][k-1][4] );
a[i][j][4][1] = - dt * tz2
* ( - C2 * ( u[i][j][k-1][1]*u[i][j][k-1][3] ) * tmp2 )
- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k-1][1];
a[i][j][4][2] = - dt * tz2
* ( - C2 * ( u[i][j][k-1][2]*u[i][j][k-1][3] ) * tmp2 )
- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k-1][2];
a[i][j][4][3] = - dt * tz2
* ( C1 * ( u[i][j][k-1][4] * tmp1 )
- 0.50 * C2
* ( ( u[i][j][k-1][1]*u[i][j][k-1][1]
+ u[i][j][k-1][2]*u[i][j][k-1][2]
+ 3.0*u[i][j][k-1][3]*u[i][j][k-1][3] ) * tmp2 ) )
- dt * tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k-1][3];
a[i][j][4][4] = - dt * tz2
* ( C1 * ( u[i][j][k-1][3] * tmp1 ) )
- dt * tz1 * c1345 * tmp1
- dt * tz1 * dz5;
/*--------------------------------------------------------------------
c form the second block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j-1][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
b[i][j][0][0] = - dt * ty1 * dy1;
b[i][j][0][1] = 0.0;
b[i][j][0][2] = - dt * ty2;
b[i][j][0][3] = 0.0;
b[i][j][0][4] = 0.0;
b[i][j][1][0] = - dt * ty2
* ( - ( u[i][j-1][k][1]*u[i][j-1][k][2] ) * tmp2 )
- dt * ty1 * ( - c34 * tmp2 * u[i][j-1][k][1] );
b[i][j][1][1] = - dt * ty2 * ( u[i][j-1][k][2] * tmp1 )
- dt * ty1 * ( c34 * tmp1 )
- dt * ty1 * dy2;
b[i][j][1][2] = - dt * ty2 * ( u[i][j-1][k][1] * tmp1 );
b[i][j][1][3] = 0.0;
b[i][j][1][4] = 0.0;
b[i][j][2][0] = - dt * ty2
* ( - ( u[i][j-1][k][2] * tmp1 ) *( u[i][j-1][k][2] * tmp1 )
+ 0.50 * C2 * ( ( u[i][j-1][k][1] * u[i][j-1][k][1]
+ u[i][j-1][k][2] * u[i][j-1][k][2]
+ u[i][j-1][k][3] * u[i][j-1][k][3] )
* tmp2 ) )
- dt * ty1 * ( - r43 * c34 * tmp2 * u[i][j-1][k][2] );
b[i][j][2][1] = - dt * ty2
* ( - C2 * ( u[i][j-1][k][1] * tmp1 ) );
b[i][j][2][2] = - dt * ty2 * ( ( 2.0 - C2 )
* ( u[i][j-1][k][2] * tmp1 ) )
- dt * ty1 * ( r43 * c34 * tmp1 )
- dt * ty1 * dy3;
b[i][j][2][3] = - dt * ty2
* ( - C2 * ( u[i][j-1][k][3] * tmp1 ) );
b[i][j][2][4] = - dt * ty2 * C2;
b[i][j][3][0] = - dt * ty2
* ( - ( u[i][j-1][k][2]*u[i][j-1][k][3] ) * tmp2 )
- dt * ty1 * ( - c34 * tmp2 * u[i][j-1][k][3] );
b[i][j][3][1] = 0.0;
b[i][j][3][2] = - dt * ty2 * ( u[i][j-1][k][3] * tmp1 );
b[i][j][3][3] = - dt * ty2 * ( u[i][j-1][k][2] * tmp1 )
- dt * ty1 * ( c34 * tmp1 )
- dt * ty1 * dy4;
b[i][j][3][4] = 0.0;
b[i][j][4][0] = - dt * ty2
* ( ( C2 * ( u[i][j-1][k][1] * u[i][j-1][k][1]
+ u[i][j-1][k][2] * u[i][j-1][k][2]
+ u[i][j-1][k][3] * u[i][j-1][k][3] ) * tmp2
- C1 * ( u[i][j-1][k][4] * tmp1 ) )
* ( u[i][j-1][k][2] * tmp1 ) )
- dt * ty1
* ( - ( c34 - c1345 )*tmp3*(pow2(u[i][j-1][k][1]))
- ( r43*c34 - c1345 )*tmp3*(pow2(u[i][j-1][k][2]))
- ( c34 - c1345 )*tmp3*(pow2(u[i][j-1][k][3]))
- c1345*tmp2*u[i][j-1][k][4] );
b[i][j][4][1] = - dt * ty2
* ( - C2 * ( u[i][j-1][k][1]*u[i][j-1][k][2] ) * tmp2 )
- dt * ty1
* ( c34 - c1345 ) * tmp2 * u[i][j-1][k][1];
b[i][j][4][2] = - dt * ty2
* ( C1 * ( u[i][j-1][k][4] * tmp1 )
- 0.50 * C2
* ( ( u[i][j-1][k][1]*u[i][j-1][k][1]
+ 3.0 * u[i][j-1][k][2]*u[i][j-1][k][2]
+ u[i][j-1][k][3]*u[i][j-1][k][3] ) * tmp2 ) )
- dt * ty1
* ( r43*c34 - c1345 ) * tmp2 * u[i][j-1][k][2];
b[i][j][4][3] = - dt * ty2
* ( - C2 * ( u[i][j-1][k][2]*u[i][j-1][k][3] ) * tmp2 )
- dt * ty1 * ( c34 - c1345 ) * tmp2 * u[i][j-1][k][3];
b[i][j][4][4] = - dt * ty2
* ( C1 * ( u[i][j-1][k][2] * tmp1 ) )
- dt * ty1 * c1345 * tmp1
- dt * ty1 * dy5;
/*--------------------------------------------------------------------
c form the third block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i-1][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
c[i][j][0][0] = - dt * tx1 * dx1;
c[i][j][0][1] = - dt * tx2;
c[i][j][0][2] = 0.0;
c[i][j][0][3] = 0.0;
c[i][j][0][4] = 0.0;
c[i][j][1][0] = - dt * tx2
* ( - ( u[i-1][j][k][1] * tmp1 ) *( u[i-1][j][k][1] * tmp1 )
+ C2 * 0.50 * ( u[i-1][j][k][1] * u[i-1][j][k][1]
+ u[i-1][j][k][2] * u[i-1][j][k][2]
+ u[i-1][j][k][3] * u[i-1][j][k][3] ) * tmp2 )
- dt * tx1 * ( - r43 * c34 * tmp2 * u[i-1][j][k][1] );
c[i][j][1][1] = - dt * tx2
* ( ( 2.0 - C2 ) * ( u[i-1][j][k][1] * tmp1 ) )
- dt * tx1 * ( r43 * c34 * tmp1 )
- dt * tx1 * dx2;
c[i][j][1][2] = - dt * tx2
* ( - C2 * ( u[i-1][j][k][2] * tmp1 ) );
c[i][j][1][3] = - dt * tx2
* ( - C2 * ( u[i-1][j][k][3] * tmp1 ) );
c[i][j][1][4] = - dt * tx2 * C2;
c[i][j][2][0] = - dt * tx2
* ( - ( u[i-1][j][k][1] * u[i-1][j][k][2] ) * tmp2 )
- dt * tx1 * ( - c34 * tmp2 * u[i-1][j][k][2] );
c[i][j][2][1] = - dt * tx2 * ( u[i-1][j][k][2] * tmp1 );
c[i][j][2][2] = - dt * tx2 * ( u[i-1][j][k][1] * tmp1 )
- dt * tx1 * ( c34 * tmp1 )
- dt * tx1 * dx3;
c[i][j][2][3] = 0.0;
c[i][j][2][4] = 0.0;
c[i][j][3][0] = - dt * tx2
* ( - ( u[i-1][j][k][1]*u[i-1][j][k][3] ) * tmp2 )
- dt * tx1 * ( - c34 * tmp2 * u[i-1][j][k][3] );
c[i][j][3][1] = - dt * tx2 * ( u[i-1][j][k][3] * tmp1 );
c[i][j][3][2] = 0.0;
c[i][j][3][3] = - dt * tx2 * ( u[i-1][j][k][1] * tmp1 )
- dt * tx1 * ( c34 * tmp1 )
- dt * tx1 * dx4;
c[i][j][3][4] = 0.0;
c[i][j][4][0] = - dt * tx2
* ( ( C2 * ( u[i-1][j][k][1] * u[i-1][j][k][1]
+ u[i-1][j][k][2] * u[i-1][j][k][2]
+ u[i-1][j][k][3] * u[i-1][j][k][3] ) * tmp2
- C1 * ( u[i-1][j][k][4] * tmp1 ) )
* ( u[i-1][j][k][1] * tmp1 ) )
- dt * tx1
* ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i-1][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i-1][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i-1][j][k][3]) )
- c1345 * tmp2 * u[i-1][j][k][4] );
c[i][j][4][1] = - dt * tx2
* ( C1 * ( u[i-1][j][k][4] * tmp1 )
- 0.50 * C2
* ( ( 3.0*u[i-1][j][k][1]*u[i-1][j][k][1]
+ u[i-1][j][k][2]*u[i-1][j][k][2]
+ u[i-1][j][k][3]*u[i-1][j][k][3] ) * tmp2 ) )
- dt * tx1
* ( r43*c34 - c1345 ) * tmp2 * u[i-1][j][k][1];
c[i][j][4][2] = - dt * tx2
* ( - C2 * ( u[i-1][j][k][2]*u[i-1][j][k][1] ) * tmp2 )
- dt * tx1
* ( c34 - c1345 ) * tmp2 * u[i-1][j][k][2];
c[i][j][4][3] = - dt * tx2
* ( - C2 * ( u[i-1][j][k][3]*u[i-1][j][k][1] ) * tmp2 )
- dt * tx1
* ( c34 - c1345 ) * tmp2 * u[i-1][j][k][3];
c[i][j][4][4] = - dt * tx2
* ( C1 * ( u[i-1][j][k][1] * tmp1 ) )
- dt * tx1 * c1345 * tmp1
- dt * tx1 * dx5;
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void jacu(int k) {
/*--------------------------------------------------------------------
c compute the upper triangular part of the jacobian matrix
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j;
double r43;
double c1345;
double c34;
double tmp1, tmp2, tmp3;
r43 = ( 4.0 / 3.0 );
c1345 = C1 * C3 * C4 * C5;
c34 = C3 * C4;
#pragma omp for nowait schedule(static)
#if defined(_OPENMP)
for (i = iend; i >= ist; i--) {
for (j = jend; j >= jst; j--) {
#else
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
#endif
/*--------------------------------------------------------------------
c form the block daigonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
d[i][j][0][0] = 1.0
+ dt * 2.0 * ( tx1 * dx1
+ ty1 * dy1
+ tz1 * dz1 );
d[i][j][0][1] = 0.0;
d[i][j][0][2] = 0.0;
d[i][j][0][3] = 0.0;
d[i][j][0][4] = 0.0;
d[i][j][1][0] = dt * 2.0
* ( tx1 * ( - r43 * c34 * tmp2 * u[i][j][k][1] )
+ ty1 * ( - c34 * tmp2 * u[i][j][k][1] )
+ tz1 * ( - c34 * tmp2 * u[i][j][k][1] ) );
d[i][j][1][1] = 1.0
+ dt * 2.0
* ( tx1 * r43 * c34 * tmp1
+ ty1 * c34 * tmp1
+ tz1 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx2
+ ty1 * dy2
+ tz1 * dz2 );
d[i][j][1][2] = 0.0;
d[i][j][1][3] = 0.0;
d[i][j][1][4] = 0.0;
d[i][j][2][0] = dt * 2.0
* ( tx1 * ( - c34 * tmp2 * u[i][j][k][2] )
+ ty1 * ( - r43 * c34 * tmp2 * u[i][j][k][2] )
+ tz1 * ( - c34 * tmp2 * u[i][j][k][2] ) );
d[i][j][2][1] = 0.0;
d[i][j][2][2] = 1.0
+ dt * 2.0
* ( tx1 * c34 * tmp1
+ ty1 * r43 * c34 * tmp1
+ tz1 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx3
+ ty1 * dy3
+ tz1 * dz3 );
d[i][j][2][3] = 0.0;
d[i][j][2][4] = 0.0;
d[i][j][3][0] = dt * 2.0
* ( tx1 * ( - c34 * tmp2 * u[i][j][k][3] )
+ ty1 * ( - c34 * tmp2 * u[i][j][k][3] )
+ tz1 * ( - r43 * c34 * tmp2 * u[i][j][k][3] ) );
d[i][j][3][1] = 0.0;
d[i][j][3][2] = 0.0;
d[i][j][3][3] = 1.0
+ dt * 2.0
* ( tx1 * c34 * tmp1
+ ty1 * c34 * tmp1
+ tz1 * r43 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx4
+ ty1 * dy4
+ tz1 * dz4 );
d[i][j][3][4] = 0.0;
d[i][j][4][0] = dt * 2.0
* ( tx1 * ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] )
+ ty1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] )
+ tz1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] ) );
d[i][j][4][1] = dt * 2.0
* ( tx1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][1]
+ ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1]
+ tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1] );
d[i][j][4][2] = dt * 2.0
* ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2]
+ ty1 * ( r43*c34 -c1345 ) * tmp2 * u[i][j][k][2]
+ tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2] );
d[i][j][4][3] = dt * 2.0
* ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3]
+ ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3]
+ tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][3] );
d[i][j][4][4] = 1.0
+ dt * 2.0 * ( tx1 * c1345 * tmp1
+ ty1 * c1345 * tmp1
+ tz1 * c1345 * tmp1 )
+ dt * 2.0 * ( tx1 * dx5
+ ty1 * dy5
+ tz1 * dz5 );
/*--------------------------------------------------------------------
c form the first block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i+1][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
a[i][j][0][0] = - dt * tx1 * dx1;
a[i][j][0][1] = dt * tx2;
a[i][j][0][2] = 0.0;
a[i][j][0][3] = 0.0;
a[i][j][0][4] = 0.0;
a[i][j][1][0] = dt * tx2
* ( - ( u[i+1][j][k][1] * tmp1 ) *( u[i+1][j][k][1] * tmp1 )
+ C2 * 0.50 * ( u[i+1][j][k][1] * u[i+1][j][k][1]
+ u[i+1][j][k][2] * u[i+1][j][k][2]
+ u[i+1][j][k][3] * u[i+1][j][k][3] ) * tmp2 )
- dt * tx1 * ( - r43 * c34 * tmp2 * u[i+1][j][k][1] );
a[i][j][1][1] = dt * tx2
* ( ( 2.0 - C2 ) * ( u[i+1][j][k][1] * tmp1 ) )
- dt * tx1 * ( r43 * c34 * tmp1 )
- dt * tx1 * dx2;
a[i][j][1][2] = dt * tx2
* ( - C2 * ( u[i+1][j][k][2] * tmp1 ) );
a[i][j][1][3] = dt * tx2
* ( - C2 * ( u[i+1][j][k][3] * tmp1 ) );
a[i][j][1][4] = dt * tx2 * C2 ;
a[i][j][2][0] = dt * tx2
* ( - ( u[i+1][j][k][1] * u[i+1][j][k][2] ) * tmp2 )
- dt * tx1 * ( - c34 * tmp2 * u[i+1][j][k][2] );
a[i][j][2][1] = dt * tx2 * ( u[i+1][j][k][2] * tmp1 );
a[i][j][2][2] = dt * tx2 * ( u[i+1][j][k][1] * tmp1 )
- dt * tx1 * ( c34 * tmp1 )
- dt * tx1 * dx3;
a[i][j][2][3] = 0.0;
a[i][j][2][4] = 0.0;
a[i][j][3][0] = dt * tx2
* ( - ( u[i+1][j][k][1]*u[i+1][j][k][3] ) * tmp2 )
- dt * tx1 * ( - c34 * tmp2 * u[i+1][j][k][3] );
a[i][j][3][1] = dt * tx2 * ( u[i+1][j][k][3] * tmp1 );
a[i][j][3][2] = 0.0;
a[i][j][3][3] = dt * tx2 * ( u[i+1][j][k][1] * tmp1 )
- dt * tx1 * ( c34 * tmp1 )
- dt * tx1 * dx4;
a[i][j][3][4] = 0.0;
a[i][j][4][0] = dt * tx2
* ( ( C2 * ( u[i+1][j][k][1] * u[i+1][j][k][1]
+ u[i+1][j][k][2] * u[i+1][j][k][2]
+ u[i+1][j][k][3] * u[i+1][j][k][3] ) * tmp2
- C1 * ( u[i+1][j][k][4] * tmp1 ) )
* ( u[i+1][j][k][1] * tmp1 ) )
- dt * tx1
* ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i+1][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i+1][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i+1][j][k][3]) )
- c1345 * tmp2 * u[i+1][j][k][4] );
a[i][j][4][1] = dt * tx2
* ( C1 * ( u[i+1][j][k][4] * tmp1 )
- 0.50 * C2
* ( ( 3.0*u[i+1][j][k][1]*u[i+1][j][k][1]
+ u[i+1][j][k][2]*u[i+1][j][k][2]
+ u[i+1][j][k][3]*u[i+1][j][k][3] ) * tmp2 ) )
- dt * tx1
* ( r43*c34 - c1345 ) * tmp2 * u[i+1][j][k][1];
a[i][j][4][2] = dt * tx2
* ( - C2 * ( u[i+1][j][k][2]*u[i+1][j][k][1] ) * tmp2 )
- dt * tx1
* ( c34 - c1345 ) * tmp2 * u[i+1][j][k][2];
a[i][j][4][3] = dt * tx2
* ( - C2 * ( u[i+1][j][k][3]*u[i+1][j][k][1] ) * tmp2 )
- dt * tx1
* ( c34 - c1345 ) * tmp2 * u[i+1][j][k][3];
a[i][j][4][4] = dt * tx2
* ( C1 * ( u[i+1][j][k][1] * tmp1 ) )
- dt * tx1 * c1345 * tmp1
- dt * tx1 * dx5;
/*--------------------------------------------------------------------
c form the second block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j+1][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
b[i][j][0][0] = - dt * ty1 * dy1;
b[i][j][0][1] = 0.0;
b[i][j][0][2] = dt * ty2;
b[i][j][0][3] = 0.0;
b[i][j][0][4] = 0.0;
b[i][j][1][0] = dt * ty2
* ( - ( u[i][j+1][k][1]*u[i][j+1][k][2] ) * tmp2 )
- dt * ty1 * ( - c34 * tmp2 * u[i][j+1][k][1] );
b[i][j][1][1] = dt * ty2 * ( u[i][j+1][k][2] * tmp1 )
- dt * ty1 * ( c34 * tmp1 )
- dt * ty1 * dy2;
b[i][j][1][2] = dt * ty2 * ( u[i][j+1][k][1] * tmp1 );
b[i][j][1][3] = 0.0;
b[i][j][1][4] = 0.0;
b[i][j][2][0] = dt * ty2
* ( - ( u[i][j+1][k][2] * tmp1 ) *( u[i][j+1][k][2] * tmp1 )
+ 0.50 * C2 * ( ( u[i][j+1][k][1] * u[i][j+1][k][1]
+ u[i][j+1][k][2] * u[i][j+1][k][2]
+ u[i][j+1][k][3] * u[i][j+1][k][3] )
* tmp2 ) )
- dt * ty1 * ( - r43 * c34 * tmp2 * u[i][j+1][k][2] );
b[i][j][2][1] = dt * ty2
* ( - C2 * ( u[i][j+1][k][1] * tmp1 ) );
b[i][j][2][2] = dt * ty2 * ( ( 2.0 - C2 )
* ( u[i][j+1][k][2] * tmp1 ) )
- dt * ty1 * ( r43 * c34 * tmp1 )
- dt * ty1 * dy3;
b[i][j][2][3] = dt * ty2
* ( - C2 * ( u[i][j+1][k][3] * tmp1 ) );
b[i][j][2][4] = dt * ty2 * C2;
b[i][j][3][0] = dt * ty2
* ( - ( u[i][j+1][k][2]*u[i][j+1][k][3] ) * tmp2 )
- dt * ty1 * ( - c34 * tmp2 * u[i][j+1][k][3] );
b[i][j][3][1] = 0.0;
b[i][j][3][2] = dt * ty2 * ( u[i][j+1][k][3] * tmp1 );
b[i][j][3][3] = dt * ty2 * ( u[i][j+1][k][2] * tmp1 )
- dt * ty1 * ( c34 * tmp1 )
- dt * ty1 * dy4;
b[i][j][3][4] = 0.0;
b[i][j][4][0] = dt * ty2
* ( ( C2 * ( u[i][j+1][k][1] * u[i][j+1][k][1]
+ u[i][j+1][k][2] * u[i][j+1][k][2]
+ u[i][j+1][k][3] * u[i][j+1][k][3] ) * tmp2
- C1 * ( u[i][j+1][k][4] * tmp1 ) )
* ( u[i][j+1][k][2] * tmp1 ) )
- dt * ty1
* ( - ( c34 - c1345 )*tmp3*( pow2(u[i][j+1][k][1]) )
- ( r43*c34 - c1345 )*tmp3*( pow2(u[i][j+1][k][2]) )
- ( c34 - c1345 )*tmp3*( pow2(u[i][j+1][k][3]) )
- c1345*tmp2*u[i][j+1][k][4] );
b[i][j][4][1] = dt * ty2
* ( - C2 * ( u[i][j+1][k][1]*u[i][j+1][k][2] ) * tmp2 )
- dt * ty1
* ( c34 - c1345 ) * tmp2 * u[i][j+1][k][1];
b[i][j][4][2] = dt * ty2
* ( C1 * ( u[i][j+1][k][4] * tmp1 )
- 0.50 * C2
* ( ( u[i][j+1][k][1]*u[i][j+1][k][1]
+ 3.0 * u[i][j+1][k][2]*u[i][j+1][k][2]
+ u[i][j+1][k][3]*u[i][j+1][k][3] ) * tmp2 ) )
- dt * ty1
* ( r43*c34 - c1345 ) * tmp2 * u[i][j+1][k][2];
b[i][j][4][3] = dt * ty2
* ( - C2 * ( u[i][j+1][k][2]*u[i][j+1][k][3] ) * tmp2 )
- dt * ty1 * ( c34 - c1345 ) * tmp2 * u[i][j+1][k][3];
b[i][j][4][4] = dt * ty2
* ( C1 * ( u[i][j+1][k][2] * tmp1 ) )
- dt * ty1 * c1345 * tmp1
- dt * ty1 * dy5;
/*--------------------------------------------------------------------
c form the third block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j][k+1][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
c[i][j][0][0] = - dt * tz1 * dz1;
c[i][j][0][1] = 0.0;
c[i][j][0][2] = 0.0;
c[i][j][0][3] = dt * tz2;
c[i][j][0][4] = 0.0;
c[i][j][1][0] = dt * tz2
* ( - ( u[i][j][k+1][1]*u[i][j][k+1][3] ) * tmp2 )
- dt * tz1 * ( - c34 * tmp2 * u[i][j][k+1][1] );
c[i][j][1][1] = dt * tz2 * ( u[i][j][k+1][3] * tmp1 )
- dt * tz1 * c34 * tmp1
- dt * tz1 * dz2 ;
c[i][j][1][2] = 0.0;
c[i][j][1][3] = dt * tz2 * ( u[i][j][k+1][1] * tmp1 );
c[i][j][1][4] = 0.0;
c[i][j][2][0] = dt * tz2
* ( - ( u[i][j][k+1][2]*u[i][j][k+1][3] ) * tmp2 )
- dt * tz1 * ( - c34 * tmp2 * u[i][j][k+1][2] );
c[i][j][2][1] = 0.0;
c[i][j][2][2] = dt * tz2 * ( u[i][j][k+1][3] * tmp1 )
- dt * tz1 * ( c34 * tmp1 )
- dt * tz1 * dz3;
c[i][j][2][3] = dt * tz2 * ( u[i][j][k+1][2] * tmp1 );
c[i][j][2][4] = 0.0;
c[i][j][3][0] = dt * tz2
* ( - ( u[i][j][k+1][3] * tmp1 ) *( u[i][j][k+1][3] * tmp1 )
+ 0.50 * C2
* ( ( u[i][j][k+1][1] * u[i][j][k+1][1]
+ u[i][j][k+1][2] * u[i][j][k+1][2]
+ u[i][j][k+1][3] * u[i][j][k+1][3] ) * tmp2 ) )
- dt * tz1 * ( - r43 * c34 * tmp2 * u[i][j][k+1][3] );
c[i][j][3][1] = dt * tz2
* ( - C2 * ( u[i][j][k+1][1] * tmp1 ) );
c[i][j][3][2] = dt * tz2
* ( - C2 * ( u[i][j][k+1][2] * tmp1 ) );
c[i][j][3][3] = dt * tz2 * ( 2.0 - C2 )
* ( u[i][j][k+1][3] * tmp1 )
- dt * tz1 * ( r43 * c34 * tmp1 )
- dt * tz1 * dz4;
c[i][j][3][4] = dt * tz2 * C2;
c[i][j][4][0] = dt * tz2
* ( ( C2 * ( u[i][j][k+1][1] * u[i][j][k+1][1]
+ u[i][j][k+1][2] * u[i][j][k+1][2]
+ u[i][j][k+1][3] * u[i][j][k+1][3] ) * tmp2
- C1 * ( u[i][j][k+1][4] * tmp1 ) )
* ( u[i][j][k+1][3] * tmp1 ) )
- dt * tz1
* ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k+1][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k+1][2]) )
- ( r43*c34 - c1345 )* tmp3 * ( pow2(u[i][j][k+1][3]) )
- c1345 * tmp2 * u[i][j][k+1][4] );
c[i][j][4][1] = dt * tz2
* ( - C2 * ( u[i][j][k+1][1]*u[i][j][k+1][3] ) * tmp2 )
- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k+1][1];
c[i][j][4][2] = dt * tz2
* ( - C2 * ( u[i][j][k+1][2]*u[i][j][k+1][3] ) * tmp2 )
- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k+1][2];
c[i][j][4][3] = dt * tz2
* ( C1 * ( u[i][j][k+1][4] * tmp1 )
- 0.50 * C2
* ( ( u[i][j][k+1][1]*u[i][j][k+1][1]
+ u[i][j][k+1][2]*u[i][j][k+1][2]
+ 3.0*u[i][j][k+1][3]*u[i][j][k+1][3] ) * tmp2 ) )
- dt * tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k+1][3];
c[i][j][4][4] = dt * tz2
* ( C1 * ( u[i][j][k+1][3] * tmp1 ) )
- dt * tz1 * c1345 * tmp1
- dt * tz1 * dz5;
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void l2norm (int nx0, int ny0, int nz0,
int ist, int iend,
int jst, int jend,
/*--------------------------------------------------------------------
c To improve cache performance, second two dimensions padded by 1
c for even number sizes only. Only needed in v.
--------------------------------------------------------------------*/
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double sum[5]) {
#pragma omp parallel
{
/*--------------------------------------------------------------------
c to compute the l2-norm of vector v.
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
double sum0=0.0, sum1=0.0, sum2=0.0, sum3=0.0, sum4=0.0;
#pragma omp single
for (m = 0; m < 5; m++) {
sum[m] = 0.0;
}
#pragma omp for nowait
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
for (k = 1; k <= nz0-2; k++) {
sum0 = sum0 + v[i][j][k][0] * v[i][j][k][0];
sum1 = sum1 + v[i][j][k][1] * v[i][j][k][1];
sum2 = sum2 + v[i][j][k][2] * v[i][j][k][2];
sum3 = sum3 + v[i][j][k][3] * v[i][j][k][3];
sum4 = sum4 + v[i][j][k][4] * v[i][j][k][4];
}
}
}
#pragma omp critical(lu)
{
sum[0] += sum0;
sum[1] += sum1;
sum[2] += sum2;
sum[3] += sum3;
sum[4] += sum4;
}
#pragma omp barrier
#pragma omp single
for (m = 0; m < 5; m++) {
sum[m] = sqrt ( sum[m] / ( (nx0-2)*(ny0-2)*(nz0-2) ) );
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void pintgr(void) {
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k;
int ibeg, ifin, ifin1;
int jbeg, jfin, jfin1;
int iglob, iglob1, iglob2;
int jglob, jglob1, jglob2;
double phi1[ISIZ2+2][ISIZ3+2]; /* phi1(0:isiz2+1,0:isiz3+1) */
double phi2[ISIZ2+2][ISIZ3+2]; /* phi2(0:isiz2+1,0:isiz3+1) */
double frc1, frc2, frc3;
/*--------------------------------------------------------------------
c set up the sub-domains for integeration in each processor
--------------------------------------------------------------------*/
ibeg = nx;
ifin = 0;
iglob1 = -1;
iglob2 = nx-1;
if (iglob1 >= ii1 && iglob2 < ii2+nx) ibeg = 0;
if (iglob1 >= ii1-nx && iglob2 <= ii2) ifin = nx;
if (ii1 >= iglob1 && ii1 <= iglob2) ibeg = ii1;
if (ii2 >= iglob1 && ii2 <= iglob2) ifin = ii2;
jbeg = ny;
jfin = -1;
jglob1 = 0;
jglob2 = ny-1;
if (jglob1 >= ji1 && jglob2 < ji2+ny) jbeg = 0;
if (jglob1 > ji1-ny && jglob2 <= ji2) jfin = ny;
if (ji1 >= jglob1 && ji1 <= jglob2) jbeg = ji1;
if (ji2 >= jglob1 && ji2 <= jglob2) jfin = ji2;
ifin1 = ifin;
jfin1 = jfin;
if (ifin1 == ii2) ifin1 = ifin -1;
if (jfin1 == ji2) jfin1 = jfin -1;
/*--------------------------------------------------------------------
c initialize
--------------------------------------------------------------------*/
for (i = 0; i <= ISIZ2+1; i++) {
for (k = 0; k <= ISIZ3+1; k++) {
phi1[i][k] = 0.0;
phi2[i][k] = 0.0;
}
}
for (i = ibeg; i <= ifin; i++) {
iglob = i;
for (j = jbeg; j <= jfin; j++) {
jglob = j;
k = ki1;
phi1[i][j] = C2*( u[i][j][k][4]
- 0.50 * ( pow2(u[i][j][k][1])
+ pow2(u[i][j][k][2])
+ pow2(u[i][j][k][3]) )
/ u[i][j][k][0] );
k = ki2;
phi2[i][j] = C2*( u[i][j][k][4]
- 0.50 * ( pow2(u[i][j][k][1])
+ pow2(u[i][j][k][2])
+ pow2(u[i][j][k][3]) )
/ u[i][j][k][0] );
}
}
frc1 = 0.0;
for (i = ibeg; i <= ifin1; i++) {
for (j = jbeg; j <= jfin1; j++) {
frc1 = frc1 + ( phi1[i][j]
+ phi1[i+1][j]
+ phi1[i][j+1]
+ phi1[i+1][j+1]
+ phi2[i][j]
+ phi2[i+1][j]
+ phi2[i][j+1]
+ phi2[i+1][j+1] );
}
}
frc1 = dxi * deta * frc1;
/*--------------------------------------------------------------------
c initialize
--------------------------------------------------------------------*/
for (i = 0; i <= ISIZ2+1; i++) {
for (k = 0; k <= ISIZ3+1; k++) {
phi1[i][k] = 0.0;
phi2[i][k] = 0.0;
}
}
jglob = jbeg;
if (jglob == ji1) {
for (i = ibeg; i <= ifin; i++) {
iglob = i;
for (k = ki1; k <= ki2; k++) {
phi1[i][k] = C2*( u[i][jbeg][k][4]
- 0.50 * ( pow2(u[i][jbeg][k][1])
+ pow2(u[i][jbeg][k][2])
+ pow2(u[i][jbeg][k][3]) )
/ u[i][jbeg][k][0] );
}
}
}
jglob = jfin;
if (jglob == ji2) {
for (i = ibeg; i <= ifin; i++) {
iglob = i;
for (k = ki1; k <= ki2; k++) {
phi2[i][k] = C2*( u[i][jfin][k][4]
- 0.50 * ( pow2(u[i][jfin][k][1])
+ pow2(u[i][jfin][k][2])
+ pow2(u[i][jfin][k][3]) )
/ u[i][jfin][k][0] );
}
}
}
frc2 = 0.0;
for (i = ibeg; i <= ifin1; i++) {
for (k = ki1; k <= ki2-1; k++) {
frc2 = frc2 + ( phi1[i][k]
+ phi1[i+1][k]
+ phi1[i][k+1]
+ phi1[i+1][k+1]
+ phi2[i][k]
+ phi2[i+1][k]
+ phi2[i][k+1]
+ phi2[i+1][k+1] );
}
}
frc2 = dxi * dzeta * frc2;
/*--------------------------------------------------------------------
c initialize
--------------------------------------------------------------------*/
for (i = 0; i <= ISIZ2+1; i++) {
for (k = 0; k <= ISIZ3+1; k++) {
phi1[i][k] = 0.0;
phi2[i][k] = 0.0;
}
}
iglob = ibeg;
if (iglob == ii1) {
for (j = jbeg; j <= jfin; j++) {
jglob = j;
for (k = ki1; k <= ki2; k++) {
phi1[j][k] = C2*( u[ibeg][j][k][4]
- 0.50 * ( pow2(u[ibeg][j][k][1])
+ pow2(u[ibeg][j][k][2])
+ pow2(u[ibeg][j][k][3]) )
/ u[ibeg][j][k][0] );
}
}
}
iglob = ifin;
if (iglob == ii2) {
for (j = jbeg; j <= jfin; j++) {
jglob = j;
for (k = ki1; k <= ki2; k++) {
phi2[j][k] = C2*( u[ifin][j][k][4]
- 0.50 * ( pow2(u[ifin][j][k][1])
+ pow2(u[ifin][j][k][2])
+ pow2(u[ifin][j][k][3]) )
/ u[ifin][j][k][0] );
}
}
}
frc3 = 0.0;
for (j = jbeg; j <= jfin1; j++) {
for (k = ki1; k <= ki2-1; k++) {
frc3 = frc3 + ( phi1[j][k]
+ phi1[j+1][k]
+ phi1[j][k+1]
+ phi1[j+1][k+1]
+ phi2[j][k]
+ phi2[j+1][k]
+ phi2[j][k+1]
+ phi2[j+1][k+1] );
}
}
frc3 = deta * dzeta * frc3;
frc = 0.25 * ( frc1 + frc2 + frc3 );
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void read_input(void) {
// FILE *fp;
/*--------------------------------------------------------------------
c if input file does not exist, it uses defaults
c ipr = 1 for detailed progress output
c inorm = how often the norm is printed (once every inorm iterations)
c itmax = number of pseudo time steps
c dt = time step
c omega 1 over-relaxation factor for SSOR
c tolrsd = steady state residual tolerance levels
c nx, ny, nz = number of grid points in x, y, z directions
--------------------------------------------------------------------*/
printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version"
" - LU Benchmark\n\n");
/* fp = fopen("inputlu.data", "r"); */
/* if (fp != NULL) { */
/* printf(" Reading from input file inputlu.data\n"); */
/* while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); */
/* fscanf(fp, "%d%d", &ipr, &inorm); */
/* while(fgetc(fp) != '\n'); */
/* while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); */
/* fscanf(fp, "%d", &itmax); */
/* while(fgetc(fp) != '\n'); */
/* while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); */
/* fscanf(fp, "%lf", &dt); */
/* while(fgetc(fp) != '\n'); */
/* while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); */
/* fscanf(fp, "%lf", &omega); */
/* while(fgetc(fp) != '\n'); */
/* while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); */
/* fscanf(fp, "%lf%lf%lf%lf%lf", */
/* &tolrsd[0], &tolrsd[1], &tolrsd[2], &tolrsd[3], &tolrsd[4]); */
/* while(fgetc(fp) != '\n'); */
/* while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); */
/* fscanf(fp, "%d%d%d", &nx0, &ny0, &nz0); */
/* while(fgetc(fp) != '\n'); */
/* fclose(fp); */
/* } else { */
ipr = IPR_DEFAULT;
inorm = INORM_DEFAULT;
itmax = ITMAX_DEFAULT;
dt = DT_DEFAULT;
omega = OMEGA_DEFAULT;
tolrsd[0] = TOLRSD1_DEF;
tolrsd[1] = TOLRSD2_DEF;
tolrsd[2] = TOLRSD3_DEF;
tolrsd[3] = TOLRSD4_DEF;
tolrsd[4] = TOLRSD5_DEF;
nx0 = ISIZ1;
ny0 = ISIZ2;
nz0 = ISIZ3;
// }
/*--------------------------------------------------------------------
c check problem size
--------------------------------------------------------------------*/
if ( nx0 < 4 || ny0 < 4 || nz0 < 4 ) {
printf(" PROBLEM SIZE IS TOO SMALL - \n"
" SET EACH OF NX, NY AND NZ AT LEAST EQUAL TO 5\n");
exit(1);
}
if ( nx0 > ISIZ1 || ny0 > ISIZ2 || nz0 > ISIZ3 ) {
printf(" PROBLEM SIZE IS TOO LARGE - \n"
" NX, NY AND NZ SHOULD BE EQUAL TO \n"
" ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY\n");
exit(1);
}
printf(" Size: %3dx%3dx%3d\n", nx0, ny0, nz0);
printf(" Iterations: %3d\n", itmax);
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void rhs(void) {
#pragma omp parallel
{
/*--------------------------------------------------------------------
c compute the right hand sides
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
int L1, L2;
int ist1, iend1;
int jst1, jend1;
double q;
double u21, u31, u41;
double tmp;
double u21i, u31i, u41i, u51i;
double u21j, u31j, u41j, u51j;
double u21k, u31k, u41k, u51k;
double u21im1, u31im1, u41im1, u51im1;
double u21jm1, u31jm1, u41jm1, u51jm1;
double u21km1, u31km1, u41km1, u51km1;
#pragma omp for
for (i = 0; i <= nx-1; i++) {
for (j = 0; j <= ny-1; j++) {
for (k = 0; k <= nz-1; k++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = - frct[i][j][k][m];
}
}
}
}
/*--------------------------------------------------------------------
c xi-direction flux differences
--------------------------------------------------------------------*/
L1 = 0;
L2 = nx-1;
#pragma omp for
for (i = L1; i <= L2; i++) {
for (j = jst; j <= jend; j++) {
for (k = 1; k <= nz - 2; k++) {
flux[i][j][k][0] = u[i][j][k][1];
u21 = u[i][j][k][1] / u[i][j][k][0];
q = 0.50 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
/ u[i][j][k][0];
flux[i][j][k][1] = u[i][j][k][1] * u21 + C2 *
( u[i][j][k][4] - q );
flux[i][j][k][2] = u[i][j][k][2] * u21;
flux[i][j][k][3] = u[i][j][k][3] * u21;
flux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u21;
}
}
}
#pragma omp for
for (j = jst; j <= jend; j++) {
for (k = 1; k <= nz - 2; k++) {
for (i = ist; i <= iend; i++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] );
}
}
L2 = nx-1;
for (i = ist; i <= L2; i++) {
tmp = 1.0 / u[i][j][k][0];
u21i = tmp * u[i][j][k][1];
u31i = tmp * u[i][j][k][2];
u41i = tmp * u[i][j][k][3];
u51i = tmp * u[i][j][k][4];
tmp = 1.0 / u[i-1][j][k][0];
u21im1 = tmp * u[i-1][j][k][1];
u31im1 = tmp * u[i-1][j][k][2];
u41im1 = tmp * u[i-1][j][k][3];
u51im1 = tmp * u[i-1][j][k][4];
flux[i][j][k][1] = (4.0/3.0) * tx3 * (u21i-u21im1);
flux[i][j][k][2] = tx3 * ( u31i - u31im1 );
flux[i][j][k][3] = tx3 * ( u41i - u41im1 );
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* tx3 * ( ( pow2(u21i) + pow2(u31i) + pow2(u41i) )
- ( pow2(u21im1) + pow2(u31im1) + pow2(u41im1) ) )
+ (1.0/6.0)
* tx3 * ( pow2(u21i) - pow2(u21im1) )
+ C1 * C5 * tx3 * ( u51i - u51im1 );
}
for (i = ist; i <= iend; i++) {
rsd[i][j][k][0] = rsd[i][j][k][0]
+ dx1 * tx1 * ( u[i-1][j][k][0]
- 2.0 * u[i][j][k][0]
+ u[i+1][j][k][0] );
rsd[i][j][k][1] = rsd[i][j][k][1]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][1] - flux[i][j][k][1] )
+ dx2 * tx1 * ( u[i-1][j][k][1]
- 2.0 * u[i][j][k][1]
+ u[i+1][j][k][1] );
rsd[i][j][k][2] = rsd[i][j][k][2]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][2] - flux[i][j][k][2] )
+ dx3 * tx1 * ( u[i-1][j][k][2]
- 2.0 * u[i][j][k][2]
+ u[i+1][j][k][2] );
rsd[i][j][k][3] = rsd[i][j][k][3]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][3] - flux[i][j][k][3] )
+ dx4 * tx1 * ( u[i-1][j][k][3]
- 2.0 * u[i][j][k][3]
+ u[i+1][j][k][3] );
rsd[i][j][k][4] = rsd[i][j][k][4]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][4] - flux[i][j][k][4] )
+ dx5 * tx1 * ( u[i-1][j][k][4]
- 2.0 * u[i][j][k][4]
+ u[i+1][j][k][4] );
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
rsd[1][j][k][m] = rsd[1][j][k][m]
- dssp * ( + 5.0 * u[1][j][k][m]
- 4.0 * u[2][j][k][m]
+ u[3][j][k][m] );
rsd[2][j][k][m] = rsd[2][j][k][m]
- dssp * ( - 4.0 * u[1][j][k][m]
+ 6.0 * u[2][j][k][m]
- 4.0 * u[3][j][k][m]
+ u[4][j][k][m] );
}
ist1 = 3;
iend1 = nx - 4;
for (i = ist1; i <= iend1; i++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- dssp * ( u[i-2][j][k][m]
- 4.0 * u[i-1][j][k][m]
+ 6.0 * u[i][j][k][m]
- 4.0 * u[i+1][j][k][m]
+ u[i+2][j][k][m] );
}
}
for (m = 0; m < 5; m++) {
rsd[nx-3][j][k][m] = rsd[nx-3][j][k][m]
- dssp * ( u[nx-5][j][k][m]
- 4.0 * u[nx-4][j][k][m]
+ 6.0 * u[nx-3][j][k][m]
- 4.0 * u[nx-2][j][k][m] );
rsd[nx-2][j][k][m] = rsd[nx-2][j][k][m]
- dssp * ( u[nx-4][j][k][m]
- 4.0 * u[nx-3][j][k][m]
+ 5.0 * u[nx-2][j][k][m] );
}
}
}
/*--------------------------------------------------------------------
c eta-direction flux differences
--------------------------------------------------------------------*/
L1 = 0;
L2 = ny-1;
#pragma omp for
for (i = ist; i <= iend; i++) {
for (j = L1; j <= L2; j++) {
for (k = 1; k <= nz - 2; k++) {
flux[i][j][k][0] = u[i][j][k][2];
u31 = u[i][j][k][2] / u[i][j][k][0];
q = 0.50 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
/ u[i][j][k][0];
flux[i][j][k][1] = u[i][j][k][1] * u31;
flux[i][j][k][2] = u[i][j][k][2] * u31 + C2 * (u[i][j][k][4]-q);
flux[i][j][k][3] = u[i][j][k][3] * u31;
flux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u31;
}
}
}
#pragma omp for
for (i = ist; i <= iend; i++) {
for (k = 1; k <= nz - 2; k++) {
for (j = jst; j <= jend; j++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] );
}
}
L2 = ny-1;
for (j = jst; j <= L2; j++) {
tmp = 1.0 / u[i][j][k][0];
u21j = tmp * u[i][j][k][1];
u31j = tmp * u[i][j][k][2];
u41j = tmp * u[i][j][k][3];
u51j = tmp * u[i][j][k][4];
tmp = 1.0 / u[i][j-1][k][0];
u21jm1 = tmp * u[i][j-1][k][1];
u31jm1 = tmp * u[i][j-1][k][2];
u41jm1 = tmp * u[i][j-1][k][3];
u51jm1 = tmp * u[i][j-1][k][4];
flux[i][j][k][1] = ty3 * ( u21j - u21jm1 );
flux[i][j][k][2] = (4.0/3.0) * ty3 * (u31j-u31jm1);
flux[i][j][k][3] = ty3 * ( u41j - u41jm1 );
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* ty3 * ( ( pow2(u21j) + pow2(u31j) + pow2(u41j) )
- ( pow2(u21jm1) + pow2(u31jm1) + pow2(u41jm1) ) )
+ (1.0/6.0)
* ty3 * ( pow2(u31j) - pow2(u31jm1) )
+ C1 * C5 * ty3 * ( u51j - u51jm1 );
}
for (j = jst; j <= jend; j++) {
rsd[i][j][k][0] = rsd[i][j][k][0]
+ dy1 * ty1 * ( u[i][j-1][k][0]
- 2.0 * u[i][j][k][0]
+ u[i][j+1][k][0] );
rsd[i][j][k][1] = rsd[i][j][k][1]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][1] - flux[i][j][k][1] )
+ dy2 * ty1 * ( u[i][j-1][k][1]
- 2.0 * u[i][j][k][1]
+ u[i][j+1][k][1] );
rsd[i][j][k][2] = rsd[i][j][k][2]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][2] - flux[i][j][k][2] )
+ dy3 * ty1 * ( u[i][j-1][k][2]
- 2.0 * u[i][j][k][2]
+ u[i][j+1][k][2] );
rsd[i][j][k][3] = rsd[i][j][k][3]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][3] - flux[i][j][k][3] )
+ dy4 * ty1 * ( u[i][j-1][k][3]
- 2.0 * u[i][j][k][3]
+ u[i][j+1][k][3] );
rsd[i][j][k][4] = rsd[i][j][k][4]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][4] - flux[i][j][k][4] )
+ dy5 * ty1 * ( u[i][j-1][k][4]
- 2.0 * u[i][j][k][4]
+ u[i][j+1][k][4] );
}
/*--------------------------------------------------------------------
c fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
rsd[i][1][k][m] = rsd[i][1][k][m]
- dssp * ( + 5.0 * u[i][1][k][m]
- 4.0 * u[i][2][k][m]
+ u[i][3][k][m] );
rsd[i][2][k][m] = rsd[i][2][k][m]
- dssp * ( - 4.0 * u[i][1][k][m]
+ 6.0 * u[i][2][k][m]
- 4.0 * u[i][3][k][m]
+ u[i][4][k][m] );
}
jst1 = 3;
jend1 = ny - 4;
for (j = jst1; j <= jend1; j++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- dssp * ( u[i][j-2][k][m]
- 4.0 * u[i][j-1][k][m]
+ 6.0 * u[i][j][k][m]
- 4.0 * u[i][j+1][k][m]
+ u[i][j+2][k][m] );
}
}
for (m = 0; m < 5; m++) {
rsd[i][ny-3][k][m] = rsd[i][ny-3][k][m]
- dssp * ( u[i][ny-5][k][m]
- 4.0 * u[i][ny-4][k][m]
+ 6.0 * u[i][ny-3][k][m]
- 4.0 * u[i][ny-2][k][m] );
rsd[i][ny-2][k][m] = rsd[i][ny-2][k][m]
- dssp * ( u[i][ny-4][k][m]
- 4.0 * u[i][ny-3][k][m]
+ 5.0 * u[i][ny-2][k][m] );
}
}
}
/*--------------------------------------------------------------------
c zeta-direction flux differences
--------------------------------------------------------------------*/
#pragma omp for
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
for (k = 0; k <= nz-1; k++) {
flux[i][j][k][0] = u[i][j][k][3];
u41 = u[i][j][k][3] / u[i][j][k][0];
q = 0.50 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
/ u[i][j][k][0];
flux[i][j][k][1] = u[i][j][k][1] * u41;
flux[i][j][k][2] = u[i][j][k][2] * u41;
flux[i][j][k][3] = u[i][j][k][3] * u41 + C2 * (u[i][j][k][4]-q);
flux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u41;
}
for (k = 1; k <= nz - 2; k++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- tz2 * ( flux[i][j][k+1][m] - flux[i][j][k-1][m] );
}
}
for (k = 1; k <= nz-1; k++) {
tmp = 1.0 / u[i][j][k][0];
u21k = tmp * u[i][j][k][1];
u31k = tmp * u[i][j][k][2];
u41k = tmp * u[i][j][k][3];
u51k = tmp * u[i][j][k][4];
tmp = 1.0 / u[i][j][k-1][0];
u21km1 = tmp * u[i][j][k-1][1];
u31km1 = tmp * u[i][j][k-1][2];
u41km1 = tmp * u[i][j][k-1][3];
u51km1 = tmp * u[i][j][k-1][4];
flux[i][j][k][1] = tz3 * ( u21k - u21km1 );
flux[i][j][k][2] = tz3 * ( u31k - u31km1 );
flux[i][j][k][3] = (4.0/3.0) * tz3 * (u41k-u41km1);
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* tz3 * ( ( pow2(u21k) + pow2(u31k) + pow2(u41k) )
- ( pow2(u21km1) + pow2(u31km1) + pow2(u41km1) ) )
+ (1.0/6.0)
* tz3 * ( pow2(u41k) - pow2(u41km1) )
+ C1 * C5 * tz3 * ( u51k - u51km1 );
}
for (k = 1; k <= nz - 2; k++) {
rsd[i][j][k][0] = rsd[i][j][k][0]
+ dz1 * tz1 * ( u[i][j][k-1][0]
- 2.0 * u[i][j][k][0]
+ u[i][j][k+1][0] );
rsd[i][j][k][1] = rsd[i][j][k][1]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][1] - flux[i][j][k][1] )
+ dz2 * tz1 * ( u[i][j][k-1][1]
- 2.0 * u[i][j][k][1]
+ u[i][j][k+1][1] );
rsd[i][j][k][2] = rsd[i][j][k][2]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][2] - flux[i][j][k][2] )
+ dz3 * tz1 * ( u[i][j][k-1][2]
- 2.0 * u[i][j][k][2]
+ u[i][j][k+1][2] );
rsd[i][j][k][3] = rsd[i][j][k][3]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][3] - flux[i][j][k][3] )
+ dz4 * tz1 * ( u[i][j][k-1][3]
- 2.0 * u[i][j][k][3]
+ u[i][j][k+1][3] );
rsd[i][j][k][4] = rsd[i][j][k][4]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][4] - flux[i][j][k][4] )
+ dz5 * tz1 * ( u[i][j][k-1][4]
- 2.0 * u[i][j][k][4]
+ u[i][j][k+1][4] );
}
/*--------------------------------------------------------------------
c fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
rsd[i][j][1][m] = rsd[i][j][1][m]
- dssp * ( + 5.0 * u[i][j][1][m]
- 4.0 * u[i][j][2][m]
+ u[i][j][3][m] );
rsd[i][j][2][m] = rsd[i][j][2][m]
- dssp * ( - 4.0 * u[i][j][1][m]
+ 6.0 * u[i][j][2][m]
- 4.0 * u[i][j][3][m]
+ u[i][j][4][m] );
}
for (k = 3; k <= nz - 4; k++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- dssp * ( u[i][j][k-2][m]
- 4.0 * u[i][j][k-1][m]
+ 6.0 * u[i][j][k][m]
- 4.0 * u[i][j][k+1][m]
+ u[i][j][k+2][m] );
}
}
for (m = 0; m < 5; m++) {
rsd[i][j][nz-3][m] = rsd[i][j][nz-3][m]
- dssp * ( u[i][j][nz-5][m]
- 4.0 * u[i][j][nz-4][m]
+ 6.0 * u[i][j][nz-3][m]
- 4.0 * u[i][j][nz-2][m] );
rsd[i][j][nz-2][m] = rsd[i][j][nz-2][m]
- dssp * ( u[i][j][nz-4][m]
- 4.0 * u[i][j][nz-3][m]
+ 5.0 * u[i][j][nz-2][m] );
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void setbv(void) {
#pragma omp parallel
{
/*--------------------------------------------------------------------
c set the boundary values of dependent variables
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k;
int iglob, jglob;
/*--------------------------------------------------------------------
c set the dependent variable values along the top and bottom faces
--------------------------------------------------------------------*/
#pragma omp for
for (i = 0; i < nx; i++) {
iglob = i;
for (j = 0; j < ny; j++) {
jglob = j;
exact( iglob, jglob, 0, &u[i][j][0][0] );
exact( iglob, jglob, nz-1, &u[i][j][nz-1][0] );
}
}
/*--------------------------------------------------------------------
c set the dependent variable values along north and south faces
--------------------------------------------------------------------*/
#pragma omp for
for (i = 0; i < nx; i++) {
iglob = i;
for (k = 0; k < nz; k++) {
exact( iglob, 0, k, &u[i][0][k][0] );
}
}
#pragma omp for
for (i = 0; i < nx; i++) {
iglob = i;
for (k = 0; k < nz; k++) {
exact( iglob, ny0-1, k, &u[i][ny-1][k][0] );
}
}
/*--------------------------------------------------------------------
c set the dependent variable values along east and west faces
--------------------------------------------------------------------*/
#pragma omp for
for (j = 0; j < ny; j++) {
jglob = j;
for (k = 0; k < nz; k++) {
exact( 0, jglob, k, &u[0][j][k][0] );
}
}
#pragma omp for
for (j = 0; j < ny; j++) {
jglob = j;
for (k = 0; k < nz; k++) {
exact( nx0-1, jglob, k, &u[nx-1][j][k][0] );
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void setcoeff(void) {
/*--------------------------------------------------------------------
c set up coefficients
--------------------------------------------------------------------*/
dxi = 1.0 / ( nx0 - 1 );
deta = 1.0 / ( ny0 - 1 );
dzeta = 1.0 / ( nz0 - 1 );
tx1 = 1.0 / ( dxi * dxi );
tx2 = 1.0 / ( 2.0 * dxi );
tx3 = 1.0 / dxi;
ty1 = 1.0 / ( deta * deta );
ty2 = 1.0 / ( 2.0 * deta );
ty3 = 1.0 / deta;
tz1 = 1.0 / ( dzeta * dzeta );
tz2 = 1.0 / ( 2.0 * dzeta );
tz3 = 1.0 / dzeta;
ii1 = 1;
ii2 = nx0 - 2;
ji1 = 1;
ji2 = ny0 - 3;
ki1 = 2;
ki2 = nz0 - 2;
/*--------------------------------------------------------------------
c diffusion coefficients
--------------------------------------------------------------------*/
dx1 = 0.75;
dx2 = dx1;
dx3 = dx1;
dx4 = dx1;
dx5 = dx1;
dy1 = 0.75;
dy2 = dy1;
dy3 = dy1;
dy4 = dy1;
dy5 = dy1;
dz1 = 1.00;
dz2 = dz1;
dz3 = dz1;
dz4 = dz1;
dz5 = dz1;
/*--------------------------------------------------------------------
c fourth difference dissipation
--------------------------------------------------------------------*/
dssp = ( max (dx1, max(dy1, dz1) ) ) / 4.0;
/*--------------------------------------------------------------------
c coefficients of the exact solution to the first pde
--------------------------------------------------------------------*/
ce[0][0] = 2.0;
ce[0][1] = 0.0;
ce[0][2] = 0.0;
ce[0][3] = 4.0;
ce[0][4] = 5.0;
ce[0][5] = 3.0;
ce[0][6] = 5.0e-01;
ce[0][7] = 2.0e-02;
ce[0][8] = 1.0e-02;
ce[0][9] = 3.0e-02;
ce[0][10] = 5.0e-01;
ce[0][11] = 4.0e-01;
ce[0][12] = 3.0e-01;
/*--------------------------------------------------------------------
c coefficients of the exact solution to the second pde
--------------------------------------------------------------------*/
ce[1][0] = 1.0;
ce[1][1] = 0.0;
ce[1][2] = 0.0;
ce[1][3] = 0.0;
ce[1][4] = 1.0;
ce[1][5] = 2.0;
ce[1][6] = 3.0;
ce[1][7] = 1.0e-02;
ce[1][8] = 3.0e-02;
ce[1][9] = 2.0e-02;
ce[1][10] = 4.0e-01;
ce[1][11] = 3.0e-01;
ce[1][12] = 5.0e-01;
/*--------------------------------------------------------------------
c coefficients of the exact solution to the third pde
--------------------------------------------------------------------*/
ce[2][0] = 2.0;
ce[2][1] = 2.0;
ce[2][2] = 0.0;
ce[2][3] = 0.0;
ce[2][4] = 0.0;
ce[2][5] = 2.0;
ce[2][6] = 3.0;
ce[2][7] = 4.0e-02;
ce[2][8] = 3.0e-02;
ce[2][9] = 5.0e-02;
ce[2][10] = 3.0e-01;
ce[2][11] = 5.0e-01;
ce[2][12] = 4.0e-01;
/*--------------------------------------------------------------------
c coefficients of the exact solution to the fourth pde
--------------------------------------------------------------------*/
ce[3][0] = 2.0;
ce[3][1] = 2.0;
ce[3][2] = 0.0;
ce[3][3] = 0.0;
ce[3][4] = 0.0;
ce[3][5] = 2.0;
ce[3][6] = 3.0;
ce[3][7] = 3.0e-02;
ce[3][8] = 5.0e-02;
ce[3][9] = 4.0e-02;
ce[3][10] = 2.0e-01;
ce[3][11] = 1.0e-01;
ce[3][12] = 3.0e-01;
/*--------------------------------------------------------------------
c coefficients of the exact solution to the fifth pde
--------------------------------------------------------------------*/
ce[4][0] = 5.0;
ce[4][1] = 4.0;
ce[4][2] = 3.0;
ce[4][3] = 2.0;
ce[4][4] = 1.0e-01;
ce[4][5] = 4.0e-01;
ce[4][6] = 3.0e-01;
ce[4][7] = 5.0e-02;
ce[4][8] = 4.0e-02;
ce[4][9] = 3.0e-02;
ce[4][10] = 1.0e-01;
ce[4][11] = 3.0e-01;
ce[4][12] = 2.0e-01;
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void setiv(void) {
#pragma omp parallel
{
/*--------------------------------------------------------------------
c
c set the initial values of independent variables based on tri-linear
c interpolation of boundary values in the computational space.
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
int iglob, jglob;
double xi, eta, zeta;
double pxi, peta, pzeta;
double ue_1jk[5],ue_nx0jk[5],ue_i1k[5],
ue_iny0k[5],ue_ij1[5],ue_ijnz[5];
#pragma omp for
for (j = 0; j < ny; j++) {
jglob = j;
for (k = 1; k < nz - 1; k++) {
zeta = ((double)k) / (nz-1);
if (jglob != 0 && jglob != ny0-1) {
eta = ( (double) (jglob) ) / (ny0-1);
for (i = 0; i < nx; i++) {
iglob = i;
if(iglob != 0 && iglob != nx0-1) {
xi = ( (double) (iglob) ) / (nx0-1);
exact (0,jglob,k,ue_1jk);
exact (nx0-1,jglob,k,ue_nx0jk);
exact (iglob,0,k,ue_i1k);
exact (iglob,ny0-1,k,ue_iny0k);
exact (iglob,jglob,0,ue_ij1);
exact (iglob,jglob,nz-1,ue_ijnz);
for (m = 0; m < 5; m++) {
pxi = ( 1.0 - xi ) * ue_1jk[m]
+ xi * ue_nx0jk[m];
peta = ( 1.0 - eta ) * ue_i1k[m]
+ eta * ue_iny0k[m];
pzeta = ( 1.0 - zeta ) * ue_ij1[m]
+ zeta * ue_ijnz[m];
u[i][j][k][m] = pxi + peta + pzeta
- pxi * peta - peta * pzeta - pzeta * pxi
+ pxi * peta * pzeta;
}
}
}
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void ssor(void) {
/*--------------------------------------------------------------------
c to perform pseudo-time stepping SSOR iterations
c for five nonlinear pde s.
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
int istep;
double tmp;
double delunm[5], tv[ISIZ1][ISIZ2][5];
/*--------------------------------------------------------------------
c begin pseudo-time stepping iterations
--------------------------------------------------------------------*/
tmp = 1.0 / ( omega * ( 2.0 - omega ) ) ;
/*--------------------------------------------------------------------
c initialize a,b,c,d to zero (guarantees that page tables have been
c formed, if applicable on given architecture, before timestepping).
--------------------------------------------------------------------*/
#pragma omp parallel private(i,j,k,m)
{
#pragma omp for
for (i = 0; i < ISIZ1; i++) {
for (j = 0; j < ISIZ2; j++) {
for (k = 0; k < 5; k++) {
for (m = 0; m < 5; m++) {
a[i][j][k][m] = 0.0;
b[i][j][k][m] = 0.0;
c[i][j][k][m] = 0.0;
d[i][j][k][m] = 0.0;
}
}
}
}
}
/*--------------------------------------------------------------------
c compute the steady-state residuals
--------------------------------------------------------------------*/
rhs();
/*--------------------------------------------------------------------
c compute the L2 norms of newton iteration residuals
--------------------------------------------------------------------*/
l2norm( nx0, ny0, nz0,
ist, iend, jst, jend,
rsd, rsdnm );
timer_clear(1);
timer_start(1);
/*--------------------------------------------------------------------
c the timestep loop
--------------------------------------------------------------------*/
for (istep = 1; istep <= itmax; istep++) {
if (istep%20 == 0 || istep == itmax || istep == 1) {
#pragma omp master
printf(" Time step %4d\n", istep);
}
#pragma omp parallel private(istep,i,j,k,m)
{
/*--------------------------------------------------------------------
c perform SSOR iteration
--------------------------------------------------------------------*/
#pragma omp for
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
for (k = 1; k <= nz - 2; k++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = dt * rsd[i][j][k][m];
}
}
}
}
for (k = 1; k <= nz - 2; k++) {
/*--------------------------------------------------------------------
c form the lower triangular part of the jacobian matrix
--------------------------------------------------------------------*/
jacld(k);
/*--------------------------------------------------------------------
c perform the lower triangular solution
--------------------------------------------------------------------*/
blts(nx, ny, nz, k,
omega,
rsd,
a, b, c, d,
ist, iend, jst, jend,
nx0, ny0 );
}
#pragma omp barrier
for (k = nz - 2; k >= 1; k--) {
/*--------------------------------------------------------------------
c form the strictly upper triangular part of the jacobian matrix
--------------------------------------------------------------------*/
jacu(k);
/*--------------------------------------------------------------------
c perform the upper triangular solution
--------------------------------------------------------------------*/
buts(nx, ny, nz, k,
omega,
rsd, tv,
d, a, b, c,
ist, iend, jst, jend,
nx0, ny0 );
}
#pragma omp barrier
/*--------------------------------------------------------------------
c update the variables
--------------------------------------------------------------------*/
#pragma omp for
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
for (k = 1; k <= nz-2; k++) {
for (m = 0; m < 5; m++) {
u[i][j][k][m] = u[i][j][k][m]
+ tmp * rsd[i][j][k][m];
}
}
}
}
} /* end parallel */
/*--------------------------------------------------------------------
c compute the max-norms of newton iteration corrections
--------------------------------------------------------------------*/
if ( istep % inorm == 0 ) {
l2norm( nx0, ny0, nz0,
ist, iend, jst, jend,
rsd, delunm );
}
/*--------------------------------------------------------------------
c compute the steady-state residuals
--------------------------------------------------------------------*/
rhs();
/*--------------------------------------------------------------------
c compute the max-norms of newton iteration residuals
--------------------------------------------------------------------*/
if ( ( istep % inorm == 0 ) ||
( istep == itmax ) ) {
l2norm( nx0, ny0, nz0,
ist, iend, jst, jend,
rsd, rsdnm );
}
/*--------------------------------------------------------------------
c check the newton-iteration residuals against the tolerance levels
--------------------------------------------------------------------*/
if ( ( rsdnm[0] < tolrsd[0] ) &&
( rsdnm[1] < tolrsd[1] ) &&
( rsdnm[2] < tolrsd[2] ) &&
( rsdnm[3] < tolrsd[3] ) &&
( rsdnm[4] < tolrsd[4] ) ) {
exit(1);
}
}
timer_stop(1);
maxtime= timer_read(1);
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void verify(double xcr[5], double xce[5], double xci,
char *class, boolean *verified) {
/*--------------------------------------------------------------------
c verification routine
--------------------------------------------------------------------*/
double xcrref[5],xceref[5],xciref,
xcrdif[5],xcedif[5],xcidif,
epsilon, dtref;
int m;
/*--------------------------------------------------------------------
c tolerance level
--------------------------------------------------------------------*/
epsilon = 1.0e-08;
*class = 'U';
*verified = TRUE;
for (m = 0; m < 5; m++) {
xcrref[m] = 1.0;
xceref[m] = 1.0;
}
xciref = 1.0;
if ( nx0 == 12 && ny0 == 12 && nz0 == 12 && itmax == 50) {
*class = 'S';
dtref = 5.0e-1;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual, for the (12X12X12) grid,
c after 50 time steps, with DT = 5.0d-01
--------------------------------------------------------------------*/
xcrref[0] = 1.6196343210976702e-02;
xcrref[1] = 2.1976745164821318e-03;
xcrref[2] = 1.5179927653399185e-03;
xcrref[3] = 1.5029584435994323e-03;
xcrref[4] = 3.4264073155896461e-02;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error, for the (12X12X12) grid,
c after 50 time steps, with DT = 5.0d-01
--------------------------------------------------------------------*/
xceref[0] = 6.4223319957960924e-04;
xceref[1] = 8.4144342047347926e-05;
xceref[2] = 5.8588269616485186e-05;
xceref[3] = 5.8474222595157350e-05;
xceref[4] = 1.3103347914111294e-03;
/*--------------------------------------------------------------------
c Reference value of surface integral, for the (12X12X12) grid,
c after 50 time steps, with DT = 5.0d-01
--------------------------------------------------------------------*/
xciref = 7.8418928865937083;
} else if ( nx0 == 33 && ny0 == 33 && nz0 == 33 && itmax == 300) {
*class = 'W'; /* SPEC95fp size */
dtref = 1.5e-3;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual, for the (33x33x33) grid,
c after 300 time steps, with DT = 1.5d-3
--------------------------------------------------------------------*/
xcrref[0] = 0.1236511638192e+02;
xcrref[1] = 0.1317228477799e+01;
xcrref[2] = 0.2550120713095e+01;
xcrref[3] = 0.2326187750252e+01;
xcrref[4] = 0.2826799444189e+02;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error, for the (33X33X33) grid,
--------------------------------------------------------------------*/
xceref[0] = 0.4867877144216;
xceref[1] = 0.5064652880982e-01;
xceref[2] = 0.9281818101960e-01;
xceref[3] = 0.8570126542733e-01;
xceref[4] = 0.1084277417792e+01;
/*--------------------------------------------------------------------
c Reference value of surface integral, for the (33X33X33) grid,
c after 300 time steps, with DT = 1.5d-3
--------------------------------------------------------------------*/
xciref = 0.1161399311023e+02;
} else if ( nx0 == 64 && ny0 == 64 && nz0 == 64 && itmax == 250) {
*class = 'A';
dtref = 2.0e+0;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual, for the (64X64X64) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xcrref[0] = 7.7902107606689367e+02;
xcrref[1] = 6.3402765259692870e+01;
xcrref[2] = 1.9499249727292479e+02;
xcrref[3] = 1.7845301160418537e+02;
xcrref[4] = 1.8384760349464247e+03;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error, for the (64X64X64) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xceref[0] = 2.9964085685471943e+01;
xceref[1] = 2.8194576365003349;
xceref[2] = 7.3473412698774742;
xceref[3] = 6.7139225687777051;
xceref[4] = 7.0715315688392578e+01;
/*--------------------------------------------------------------------
c Reference value of surface integral, for the (64X64X64) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xciref = 2.6030925604886277e+01;
} else if ( nx0 == 102 && ny0 == 102 && nz0 == 102 && itmax == 250) {
*class = 'B';
dtref = 2.0e+0;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual, for the (102X102X102) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xcrref[0] = 3.5532672969982736e+03;
xcrref[1] = 2.6214750795310692e+02;
xcrref[2] = 8.8333721850952190e+02;
xcrref[3] = 7.7812774739425265e+02;
xcrref[4] = 7.3087969592545314e+03;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error, for the (102X102X102)
c grid, after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xceref[0] = 1.1401176380212709e+02;
xceref[1] = 8.1098963655421574;
xceref[2] = 2.8480597317698308e+01;
xceref[3] = 2.5905394567832939e+01;
xceref[4] = 2.6054907504857413e+02;
/*--------------------------------------------------------------------
c Reference value of surface integral, for the (102X102X102) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xciref = 4.7887162703308227e+01;
} else if ( nx0 == 162 && ny0 == 162 && nz0 == 162 && itmax == 250) {
*class = 'C';
dtref = 2.0e+0;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual, for the (162X162X162) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xcrref[0] = 1.03766980323537846e+04;
xcrref[1] = 8.92212458801008552e+02;
xcrref[2] = 2.56238814582660871e+03;
xcrref[3] = 2.19194343857831427e+03;
xcrref[4] = 1.78078057261061185e+04;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error, for the (162X162X162)
c grid, after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xceref[0] = 2.15986399716949279e+02;
xceref[1] = 1.55789559239863600e+01;
xceref[2] = 5.41318863077207766e+01;
xceref[3] = 4.82262643154045421e+01;
xceref[4] = 4.55902910043250358e+02;
/*--------------------------------------------------------------------
c Reference value of surface integral, for the (162X162X162) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xciref = 6.66404553572181300e+01;
} else {
*verified = FALSE;
}
/*--------------------------------------------------------------------
c verification test for residuals if gridsize is either 12X12X12 or
c 64X64X64 or 102X102X102 or 162X162X162
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Compute the difference of solution values and the known reference values.
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
xcrdif[m] = fabs((xcr[m]-xcrref[m])/xcrref[m]);
xcedif[m] = fabs((xce[m]-xceref[m])/xceref[m]);
}
xcidif = fabs((xci - xciref)/xciref);
/*--------------------------------------------------------------------
c Output the comparison of computed results to known cases.
--------------------------------------------------------------------*/
if (*class != 'U') {
printf("\n Verification being performed for class %1c\n", *class);
printf(" Accuracy setting for epsilon = %20.13e\n", epsilon);
if (fabs(dt-dtref) > epsilon) {
*verified = FALSE;
*class = 'U';
printf(" DT does not match the reference value of %15.8e\n", dtref);
}
} else {
printf(" Unknown class\n");
}
if (*class != 'U') {
printf(" Comparison of RMS-norms of residual\n");
} else {
printf(" RMS-norms of residual\n");
}
for (m = 0; m < 5; m++) {
if (*class == 'U') {
printf(" %2d %20.13e\n", m, xcr[m]);
} else if (xcrdif[m] > epsilon) {
*verified = FALSE;
printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n",
m,xcr[m],xcrref[m],xcrdif[m]);
} else {
printf(" %2d %20.13e%20.13e%20.13e\n",
m,xcr[m],xcrref[m],xcrdif[m]);
}
}
if (*class != 'U') {
printf(" Comparison of RMS-norms of solution error\n");
} else {
printf(" RMS-norms of solution error\n");
}
for (m = 0; m < 5; m++) {
if (*class == 'U') {
printf(" %2d %20.13e\n", m, xce[m]);
} else if (xcedif[m] > epsilon) {
*verified = FALSE;
printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n",
m,xce[m],xceref[m],xcedif[m]);
} else {
printf(" %2d %20.13e%20.13e%20.13e\n",
m,xce[m],xceref[m],xcedif[m]);
}
}
if (*class != 'U') {
printf(" Comparison of surface integral\n");
} else {
printf(" Surface integral\n");
}
if (*class == 'U') {
printf(" %20.13e\n", xci);
} else if (xcidif > epsilon) {
*verified = FALSE;
printf(" FAILURE: %20.13e%20.13e%20.13e\n",
xci, xciref, xcidif);
} else {
printf(" %20.13e%20.13e%20.13e\n",
xci, xciref, xcidif);
}
if (*class == 'U') {
printf(" No reference values provided\n");
printf(" No verification performed\n");
} else if (*verified) {
printf(" Verification Successful\n");
} else {
printf(" Verification failed\n");
}
}
|
distributiongenerator.h | // @file distributiongenerator.h This code provides basic structure for
// distribution generators. This should be inherited by all other distribution
// generators.
// @author TPOC: contact@palisade-crypto.org
//
// @copyright Copyright (c) 2019, New Jersey Institute of Technology (NJIT)
// All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution. THIS SOFTWARE IS
// PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
// EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef LBCRYPTO_MATH_DISTRIBUTIONGENERATOR_H_
#define LBCRYPTO_MATH_DISTRIBUTIONGENERATOR_H_
#include <chrono>
#include <memory>
#include <mutex>
#include <random>
#include <thread>
#include "math/backend.h"
#include "utils/prng/blake2engine.h"
// #define FIXED_SEED // if defined, then uses a fixed seed number for
// reproducible results during debug. Use only one OMP thread to ensure
// reproducibility
namespace lbcrypto {
// Defines the PRNG implementation used by PALISADE.
// The cryptographically secure PRNG used by PALISADE is based on BLAKE2 hash
// functions. A user can replace it with a different PRNG if desired by defining
// the same methods as for the Blake2Engine class.
typedef Blake2Engine PRNG;
/**
* @brief The class providing the PRNG capability to all random distribution
* generators in PALISADE. THe security of Ring Learning With Errors (used for
* all crypto capabilities in PALISADE) depends on the randomness of uniform,
* ternary, and Gaussian distributions, which derive their randomness from the
* PRNG.
*/
class PseudoRandomNumberGenerator {
public:
/**
* @brief Returns a reference to the PRNG engine
*/
static void InitPRNG() {
int threads = PalisadeParallelControls.GetNumThreads();
if (threads == 0) {
threads = 1;
}
#pragma omp parallel for num_threads(threads)
for (int i = 0; i < threads; ++i) {
GetPRNG();
}
}
static PRNG &GetPRNG() {
// initialization of PRNGs
if (m_prng == nullptr) {
#pragma omp critical
{
#if defined(FIXED_SEED)
// Only used for debugging in the single-threaded mode.
std::cerr << "**FOR DEBUGGING ONLY!!!! Using fixed initializer for "
"PRNG. Use a single thread only, e.g., OMP_NUM_THREADS=1!"
<< std::endl;
std::array<uint32_t, 16> seed{};
seed[0] = 1;
m_prng = std::make_shared<PRNG>(seed);
#else
// A 512-bit seed is generated for each thread (this roughly corresponds
// to 256 bits of security). The seed is the sum of a random sample
// generated using std::random_device (typically works correctly in
// Linux, MacOS X, and MinGW starting with GCC 9.2) and a BLAKE2 sample
// seeded from current time stamp, a hash of the current thread, and a
// memory location of a heap variable. The BLAKE2 sample is added in
// case random_device is deterministic (happens on MinGW with GCC
// below 9.2). All future calls to PRNG use the seed generated here.
// The code below derives randomness from time, thread id, and a memory
// location of a heap variable. This seed is relevant only if the
// implementation of random_device is deterministic (as in older
// versions of GCC in MinGW)
std::array<uint32_t, 16> initKey{};
// high-resolution clock typically has a nanosecond tick period
// Arguably this may give up to 32 bits of entropy as the clock gets
// recycled every 4.3 seconds
initKey[0] = std::chrono::high_resolution_clock::now()
.time_since_epoch()
.count();
// A thread id is often close to being random (on most systems)
initKey[1] = std::hash<std::thread::id>{}(std::this_thread::get_id());
// On a 64-bit machine, the thread id is 64 bits long
// skip on 32-bit arm architectures
#if !defined(__arm__) && !defined(__EMSCRIPTEN__)
if (sizeof(size_t) == 8)
initKey[2] =
(std::hash<std::thread::id>{}(std::this_thread::get_id()) >> 32);
#endif
// heap variable; we are going to use the least 32 bits of its memory
// location as the counter for BLAKE2 This will increase the entropy of
// the BLAKE2 sample
void *mem = malloc(1);
free(mem);
uint32_t counter = reinterpret_cast<long long>(mem);
PRNG gen(initKey, counter);
std::uniform_int_distribution<uint32_t> distribution(0);
std::array<uint32_t, 16> seed{};
for (uint32_t i = 0; i < 16; i++) {
seed[i] = distribution(gen);
}
std::array<uint32_t, 16> rdseed{};
size_t attempts = 3;
bool rdGenPassed = false;
size_t idx = 0;
while (!rdGenPassed && idx < attempts) {
try {
std::random_device genR;
for (uint32_t i = 0; i < 16; i++) {
// we use the fact that there is no overflow for unsigned integers
// (from C++ standard) i.e., arithmetic mod 2^32 is performed. For
// the seed to be random, it is sufficient for one of the two
// samples below to be random. In almost all practical cases,
// distribution(genR) is random. We add distribution(gen) just in
// case there is an implementation issue with random_device (as in
// older MinGW systems).
rdseed[i] = distribution(genR);
}
rdGenPassed = true;
} catch (std::exception &e) {
}
idx++;
}
for (uint32_t i = 0; i < 16; i++) {
seed[i] += rdseed[i];
}
m_prng = std::make_shared<PRNG>(seed);
#endif
}
}
return *m_prng;
}
private:
// shared pointer to a thread-specific PRNG engine
static std::shared_ptr<PRNG> m_prng;
#if !defined(FIXED_SEED)
// avoid contention on m_prng
// local copies of m_prng are created for each thread
#pragma omp threadprivate(m_prng)
#endif
};
/**
* @brief Abstract class describing generator requirements.
*
* The Distribution Generator defines the methods that must be implemented by a
* real generator. It also holds the single PRNG, which should be called by all
* child class when generating a random number is required.
*
*/
template <typename VecType>
class DistributionGenerator {
public:
DistributionGenerator() {}
virtual ~DistributionGenerator() {}
};
} // namespace lbcrypto
#endif // LBCRYPTO_MATH_DISTRIBUTIONGENERATOR_H_
|
Parser.h | //===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Parser interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
#include "clang/AST/Availability.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Frontend/OpenMP/OMPContext.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
#include <memory>
#include <stack>
namespace clang {
class PragmaHandler;
class Scope;
class BalancedDelimiterTracker;
class CorrectionCandidateCallback;
class DeclGroupRef;
class DiagnosticBuilder;
struct LoopHint;
class Parser;
class ParsingDeclRAIIObject;
class ParsingDeclSpec;
class ParsingDeclarator;
class ParsingFieldDeclarator;
class ColonProtectionRAIIObject;
class InMessageExpressionRAIIObject;
class PoisonSEHIdentifiersRAIIObject;
class OMPClause;
class ObjCTypeParamList;
class ObjCTypeParameter;
struct OMPTraitProperty;
struct OMPTraitSelector;
struct OMPTraitSet;
class OMPTraitInfo;
/// Parser - This implements a parser for the C family of languages. After
/// parsing units of the grammar, productions are invoked to handle whatever has
/// been read.
///
class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class ParsingOpenMPDirectiveRAII;
friend class InMessageExpressionRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
friend class BalancedDelimiterTracker;
Preprocessor &PP;
/// Tok - The current token we are peeking ahead. All parsing methods assume
/// that this is valid.
Token Tok;
// PrevTokLocation - The location of the token we previously
// consumed. This token is used for diagnostics where we expected to
// see a token following another token (e.g., the ';' at the end of
// a statement).
SourceLocation PrevTokLocation;
/// Tracks an expected type for the current token when parsing an expression.
/// Used by code completion for ranking.
PreferredTypeBuilder PreferredType;
unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0;
unsigned short MisplacedModuleBeginCount = 0;
/// Actions - These are the callbacks we invoke as we parse various constructs
/// in the file.
Sema &Actions;
DiagnosticsEngine &Diags;
/// ScopeCache - Cache scopes to reduce malloc traffic.
enum { ScopeCacheSize = 16 };
unsigned NumCachedScopes;
Scope *ScopeCache[ScopeCacheSize];
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
// __except block
IdentifierInfo *Ident__exception_code,
*Ident___exception_code,
*Ident_GetExceptionCode;
// __except filter expression
IdentifierInfo *Ident__exception_info,
*Ident___exception_info,
*Ident_GetExceptionInfo;
// __finally
IdentifierInfo *Ident__abnormal_termination,
*Ident___abnormal_termination,
*Ident_AbnormalTermination;
/// Contextual keywords for Microsoft extensions.
IdentifierInfo *Ident__except;
mutable IdentifierInfo *Ident_sealed;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
/// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and
/// "bool" fast comparison. Only present if AltiVec or ZVector are enabled.
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_bool;
/// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
/// Only present if AltiVec enabled.
IdentifierInfo *Ident_pixel;
/// Objective-C contextual keywords.
IdentifierInfo *Ident_instancetype;
/// Identifier for "introduced".
IdentifierInfo *Ident_introduced;
/// Identifier for "deprecated".
IdentifierInfo *Ident_deprecated;
/// Identifier for "obsoleted".
IdentifierInfo *Ident_obsoleted;
/// Identifier for "unavailable".
IdentifierInfo *Ident_unavailable;
/// Identifier for "message".
IdentifierInfo *Ident_message;
/// Identifier for "strict".
IdentifierInfo *Ident_strict;
/// Identifier for "replacement".
IdentifierInfo *Ident_replacement;
/// Identifiers used by the 'external_source_symbol' attribute.
IdentifierInfo *Ident_language, *Ident_defined_in,
*Ident_generated_declaration;
/// C++11 contextual keywords.
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_GNU_final;
mutable IdentifierInfo *Ident_override;
// C++2a contextual keywords.
mutable IdentifierInfo *Ident_import;
mutable IdentifierInfo *Ident_module;
// C++ type trait keywords that can be reverted to identifiers and still be
// used as type traits.
llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits;
std::unique_ptr<PragmaHandler> AlignHandler;
std::unique_ptr<PragmaHandler> GCCVisibilityHandler;
std::unique_ptr<PragmaHandler> OptionsHandler;
std::unique_ptr<PragmaHandler> PackHandler;
std::unique_ptr<PragmaHandler> MSStructHandler;
std::unique_ptr<PragmaHandler> UnusedHandler;
std::unique_ptr<PragmaHandler> WeakHandler;
std::unique_ptr<PragmaHandler> RedefineExtnameHandler;
std::unique_ptr<PragmaHandler> FPContractHandler;
std::unique_ptr<PragmaHandler> OpenCLExtensionHandler;
std::unique_ptr<PragmaHandler> OpenMPHandler;
std::unique_ptr<PragmaHandler> PCSectionHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
std::unique_ptr<PragmaHandler> FloatControlHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
std::unique_ptr<PragmaHandler> MSInitSeg;
std::unique_ptr<PragmaHandler> MSDataSeg;
std::unique_ptr<PragmaHandler> MSBSSSeg;
std::unique_ptr<PragmaHandler> MSConstSeg;
std::unique_ptr<PragmaHandler> MSCodeSeg;
std::unique_ptr<PragmaHandler> MSSection;
std::unique_ptr<PragmaHandler> MSRuntimeChecks;
std::unique_ptr<PragmaHandler> MSIntrinsic;
std::unique_ptr<PragmaHandler> MSOptimize;
std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler;
std::unique_ptr<PragmaHandler> OptimizeHandler;
std::unique_ptr<PragmaHandler> LoopHintHandler;
std::unique_ptr<PragmaHandler> UnrollHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollHintHandler;
std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> FPHandler;
std::unique_ptr<PragmaHandler> STDCFenvAccessHandler;
std::unique_ptr<PragmaHandler> STDCFenvRoundHandler;
std::unique_ptr<PragmaHandler> STDCCXLIMITHandler;
std::unique_ptr<PragmaHandler> STDCUnknownHandler;
std::unique_ptr<PragmaHandler> AttributePragmaHandler;
std::unique_ptr<PragmaHandler> MaxTokensHerePragmaHandler;
std::unique_ptr<PragmaHandler> MaxTokensTotalPragmaHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
/// template argument list, where the '>' closes the template
/// argument list.
bool GreaterThanIsOperator;
/// ColonIsSacred - When this is false, we aggressively try to recover from
/// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
/// safe in case statements and a few other things. This is managed by the
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
/// Parsing OpenMP directive mode.
bool OpenMPDirectiveParsing = false;
/// When true, we are directly inside an Objective-C message
/// send expression.
///
/// This is managed by the \c InMessageExpressionRAIIObject class, and
/// should not be set directly.
bool InMessageExpression;
/// Gets set to true after calling ProduceSignatureHelp, it is for a
/// workaround to make sure ProduceSignatureHelp is only called at the deepest
/// function call.
bool CalledSignatureHelp = false;
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
/// Current kind of OpenMP clause
OpenMPClauseKind OMPClauseKind = llvm::omp::OMPC_unknown;
/// RAII class that manages the template parameter depth.
class TemplateParameterDepthRAII {
unsigned &Depth;
unsigned AddedLevels;
public:
explicit TemplateParameterDepthRAII(unsigned &Depth)
: Depth(Depth), AddedLevels(0) {}
~TemplateParameterDepthRAII() {
Depth -= AddedLevels;
}
void operator++() {
++Depth;
++AddedLevels;
}
void addDepth(unsigned D) {
Depth += D;
AddedLevels += D;
}
void setAddedDepth(unsigned D) {
Depth = Depth - AddedLevels + D;
AddedLevels = D;
}
unsigned getDepth() const { return Depth; }
unsigned getOriginalDepth() const { return Depth - AddedLevels; }
};
/// Factory object for creating ParsedAttr objects.
AttributeFactory AttrFactory;
/// Gathers and cleans up TemplateIdAnnotations when parsing of a
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
void MaybeDestroyTemplateIds() {
if (!TemplateIds.empty() &&
(Tok.is(tok::eof) || !PP.mightHavePendingAnnotationTokens()))
DestroyTemplateIds();
}
void DestroyTemplateIds();
/// RAII object to destroy TemplateIdAnnotations where possible, from a
/// likely-good position during parsing.
struct DestroyTemplateIdAnnotationsRAIIObj {
Parser &Self;
DestroyTemplateIdAnnotationsRAIIObj(Parser &Self) : Self(Self) {}
~DestroyTemplateIdAnnotationsRAIIObj() { Self.MaybeDestroyTemplateIds(); }
};
/// Identifiers which have been declared within a tentative parse.
SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
/// Tracker for '<' tokens that might have been intended to be treated as an
/// angle bracket instead of a less-than comparison.
///
/// This happens when the user intends to form a template-id, but typoes the
/// template-name or forgets a 'template' keyword for a dependent template
/// name.
///
/// We track these locations from the point where we see a '<' with a
/// name-like expression on its left until we see a '>' or '>>' that might
/// match it.
struct AngleBracketTracker {
/// Flags used to rank candidate template names when there is more than one
/// '<' in a scope.
enum Priority : unsigned short {
/// A non-dependent name that is a potential typo for a template name.
PotentialTypo = 0x0,
/// A dependent name that might instantiate to a template-name.
DependentName = 0x2,
/// A space appears before the '<' token.
SpaceBeforeLess = 0x0,
/// No space before the '<' token
NoSpaceBeforeLess = 0x1,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName)
};
struct Loc {
Expr *TemplateName;
SourceLocation LessLoc;
AngleBracketTracker::Priority Priority;
unsigned short ParenCount, BracketCount, BraceCount;
bool isActive(Parser &P) const {
return P.ParenCount == ParenCount && P.BracketCount == BracketCount &&
P.BraceCount == BraceCount;
}
bool isActiveOrNested(Parser &P) const {
return isActive(P) || P.ParenCount > ParenCount ||
P.BracketCount > BracketCount || P.BraceCount > BraceCount;
}
};
SmallVector<Loc, 8> Locs;
/// Add an expression that might have been intended to be a template name.
/// In the case of ambiguity, we arbitrarily select the innermost such
/// expression, for example in 'foo < bar < baz', 'bar' is the current
/// candidate. No attempt is made to track that 'foo' is also a candidate
/// for the case where we see a second suspicious '>' token.
void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc,
Priority Prio) {
if (!Locs.empty() && Locs.back().isActive(P)) {
if (Locs.back().Priority <= Prio) {
Locs.back().TemplateName = TemplateName;
Locs.back().LessLoc = LessLoc;
Locs.back().Priority = Prio;
}
} else {
Locs.push_back({TemplateName, LessLoc, Prio,
P.ParenCount, P.BracketCount, P.BraceCount});
}
}
/// Mark the current potential missing template location as having been
/// handled (this happens if we pass a "corresponding" '>' or '>>' token
/// or leave a bracket scope).
void clear(Parser &P) {
while (!Locs.empty() && Locs.back().isActiveOrNested(P))
Locs.pop_back();
}
/// Get the current enclosing expression that might hve been intended to be
/// a template name.
Loc *getCurrent(Parser &P) {
if (!Locs.empty() && Locs.back().isActive(P))
return &Locs.back();
return nullptr;
}
};
AngleBracketTracker AngleBrackets;
IdentifierInfo *getSEHExceptKeyword();
/// True if we are within an Objective-C container while parsing C-like decls.
///
/// This is necessary because Sema thinks we have left the container
/// to parse the C-like decls, meaning Actions.getObjCDeclContext() will
/// be NULL.
bool ParsingInObjCContainer;
/// Whether to skip parsing of function bodies.
///
/// This option can be used, for example, to speed up searches for
/// declarations/definitions when indexing.
bool SkipFunctionBodies;
/// The location of the expression statement that is being parsed right now.
/// Used to determine if an expression that is being parsed is a statement or
/// just a regular sub-expression.
SourceLocation ExprStatementTokLoc;
/// Flags describing a context in which we're parsing a statement.
enum class ParsedStmtContext {
/// This context permits declarations in language modes where declarations
/// are not statements.
AllowDeclarationsInC = 0x1,
/// This context permits standalone OpenMP directives.
AllowStandaloneOpenMPDirectives = 0x2,
/// This context is at the top level of a GNU statement expression.
InStmtExpr = 0x4,
/// The context of a regular substatement.
SubStmt = 0,
/// The context of a compound-statement.
Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives,
LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr)
};
/// Act on an expression statement that might be the last statement in a
/// GNU statement expression. Checks whether we are actually at the end of
/// a statement expression and builds a suitable expression statement.
StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx);
public:
Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
~Parser() override;
const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
AttributeFactory &getAttrFactory() { return AttrFactory; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
void incrementMSManglingNumber() const {
return Actions.incrementMSManglingNumber();
}
Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists;
typedef Sema::FullExprArg FullExprArg;
// Parsing methods.
/// Initialize - Warm up the parser.
///
void Initialize();
/// Parse the first top-level declaration in a translation unit.
bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result);
/// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false);
bool ParseTopLevelDecl() {
DeclGroupPtrTy Result;
return ParseTopLevelDecl(Result);
}
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with special tokens: string literals, code completion,
/// annotation tokens and balanced tokens must be handled using the specific
/// consume methods.
/// Returns the location of the consumed token.
SourceLocation ConsumeToken() {
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
bool TryConsumeToken(tok::TokenKind Expected) {
if (Tok.isNot(Expected))
return false;
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return true;
}
bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) {
if (!TryConsumeToken(Expected))
return false;
Loc = PrevTokLocation;
return true;
}
/// ConsumeAnyToken - Dispatch to the right Consume* method based on the
/// current token type. This should only be used in cases where the type of
/// the token really isn't known, e.g. in error recovery.
SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) {
if (isTokenParen())
return ConsumeParen();
if (isTokenBracket())
return ConsumeBracket();
if (isTokenBrace())
return ConsumeBrace();
if (isTokenStringLiteral())
return ConsumeStringToken();
if (Tok.is(tok::code_completion))
return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken()
: handleUnexpectedCodeCompletionToken();
if (Tok.isAnnotation())
return ConsumeAnnotationToken();
return ConsumeToken();
}
SourceLocation getEndOfPreviousToken() {
return PP.getLocForEndOfToken(PrevTokLocation);
}
/// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds
/// to the given nullability kind.
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) {
return Actions.getNullabilityKeyword(nullability);
}
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
//
/// isTokenParen - Return true if the cur token is '(' or ')'.
bool isTokenParen() const {
return Tok.isOneOf(tok::l_paren, tok::r_paren);
}
/// isTokenBracket - Return true if the cur token is '[' or ']'.
bool isTokenBracket() const {
return Tok.isOneOf(tok::l_square, tok::r_square);
}
/// isTokenBrace - Return true if the cur token is '{' or '}'.
bool isTokenBrace() const {
return Tok.isOneOf(tok::l_brace, tok::r_brace);
}
/// isTokenStringLiteral - True if this token is a string-literal.
bool isTokenStringLiteral() const {
return tok::isStringLiteral(Tok.getKind());
}
/// isTokenSpecial - True if this token requires special consumption methods.
bool isTokenSpecial() const {
return isTokenStringLiteral() || isTokenParen() || isTokenBracket() ||
isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation();
}
/// Returns true if the current token is '=' or is a type of '='.
/// For typos, give a fixit to '='
bool isTokenEqualOrEqualTypo();
/// Return the current token to the token stream and make the given
/// token the current token.
void UnconsumeToken(Token &Consumed) {
Token Next = Tok;
PP.EnterToken(Consumed, /*IsReinject*/true);
PP.Lex(Tok);
PP.EnterToken(Next, /*IsReinject*/true);
}
SourceLocation ConsumeAnnotationToken() {
assert(Tok.isAnnotation() && "wrong consume method");
SourceLocation Loc = Tok.getLocation();
PrevTokLocation = Tok.getAnnotationEndLoc();
PP.Lex(Tok);
return Loc;
}
/// ConsumeParen - This consume method keeps the paren count up-to-date.
///
SourceLocation ConsumeParen() {
assert(isTokenParen() && "wrong consume method");
if (Tok.getKind() == tok::l_paren)
++ParenCount;
else if (ParenCount) {
AngleBrackets.clear(*this);
--ParenCount; // Don't let unbalanced )'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBracket - This consume method keeps the bracket count up-to-date.
///
SourceLocation ConsumeBracket() {
assert(isTokenBracket() && "wrong consume method");
if (Tok.getKind() == tok::l_square)
++BracketCount;
else if (BracketCount) {
AngleBrackets.clear(*this);
--BracketCount; // Don't let unbalanced ]'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBrace - This consume method keeps the brace count up-to-date.
///
SourceLocation ConsumeBrace() {
assert(isTokenBrace() && "wrong consume method");
if (Tok.getKind() == tok::l_brace)
++BraceCount;
else if (BraceCount) {
AngleBrackets.clear(*this);
--BraceCount; // Don't let unbalanced }'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeStringToken - Consume the current 'peek token', lexing a new one
/// and returning the token kind. This method is specific to strings, as it
/// handles string literal concatenation, as per C99 5.1.1.2, translation
/// phase #6.
SourceLocation ConsumeStringToken() {
assert(isTokenStringLiteral() &&
"Should only consume string literals with this method");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// Consume the current code-completion token.
///
/// This routine can be called to consume the code-completion token and
/// continue processing in special cases where \c cutOffParsing() isn't
/// desired, such as token caching or completion with lookahead.
SourceLocation ConsumeCodeCompletionToken() {
assert(Tok.is(tok::code_completion));
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
///\ brief When we are consuming a code-completion token without having
/// matched specific position in the grammar, provide code-completion results
/// based on context.
///
/// \returns the source location of the code-completion token.
SourceLocation handleUnexpectedCodeCompletionToken();
/// Abruptly cut off parsing; mainly used when we have reached the
/// code-completion point.
void cutOffParsing() {
if (PP.isCodeCompletionEnabled())
PP.setCodeCompletionReached();
// Cut off parsing by acting as if we reached the end-of-file.
Tok.setKind(tok::eof);
}
/// Determine if we're at the end of the file or at a transition
/// between modules.
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
Kind == tok::annot_module_end || Kind == tok::annot_module_include;
}
/// Checks if the \p Level is valid for use in a fold expression.
bool isFoldOperator(prec::Level Level) const;
/// Checks if the \p Kind is a valid operator for fold expressions.
bool isFoldOperator(tok::TokenKind Kind) const;
/// Initialize all pragma handlers.
void initializePragmaHandlers();
/// Destroy and reset all pragma handlers.
void resetPragmaHandlers();
/// Handle the annotation token produced for #pragma unused(...)
void HandlePragmaUnused();
/// Handle the annotation token produced for
/// #pragma GCC visibility...
void HandlePragmaVisibility();
/// Handle the annotation token produced for
/// #pragma pack...
void HandlePragmaPack();
/// Handle the annotation token produced for
/// #pragma ms_struct...
void HandlePragmaMSStruct();
/// Handle the annotation token produced for
/// #pragma comment...
void HandlePragmaMSComment();
void HandlePragmaMSPointersToMembers();
void HandlePragmaMSVtorDisp();
void HandlePragmaMSPragma();
bool HandlePragmaMSSection(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSSegment(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSInitSeg(StringRef PragmaName,
SourceLocation PragmaLocation);
/// Handle the annotation token produced for
/// #pragma align...
void HandlePragmaAlign();
/// Handle the annotation token produced for
/// #pragma clang __debug dump...
void HandlePragmaDump();
/// Handle the annotation token produced for
/// #pragma weak id...
void HandlePragmaWeak();
/// Handle the annotation token produced for
/// #pragma weak id = id...
void HandlePragmaWeakAlias();
/// Handle the annotation token produced for
/// #pragma redefine_extname...
void HandlePragmaRedefineExtname();
/// Handle the annotation token produced for
/// #pragma STDC FP_CONTRACT...
void HandlePragmaFPContract();
/// Handle the annotation token produced for
/// #pragma STDC FENV_ACCESS...
void HandlePragmaFEnvAccess();
/// Handle the annotation token produced for
/// #pragma STDC FENV_ROUND...
void HandlePragmaFEnvRound();
/// Handle the annotation token produced for
/// #pragma float_control
void HandlePragmaFloatControl();
/// \brief Handle the annotation token produced for
/// #pragma clang fp ...
void HandlePragmaFP();
/// Handle the annotation token produced for
/// #pragma OPENCL EXTENSION...
void HandlePragmaOpenCLExtension();
/// Handle the annotation token produced for
/// #pragma clang __debug captured
StmtResult HandlePragmaCaptured();
/// Handle the annotation token produced for
/// #pragma clang loop and #pragma unroll.
bool HandlePragmaLoopHint(LoopHint &Hint);
bool ParsePragmaAttributeSubjectMatchRuleSet(
attr::ParsedSubjectMatchRuleSet &SubjectMatchRules,
SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc);
void HandlePragmaAttribute();
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
///
/// Note that this differs from the Preprocessor's LookAhead method, because
/// the Parser always has one token lexed that the preprocessor doesn't.
///
const Token &GetLookAheadToken(unsigned N) {
if (N == 0 || Tok.is(tok::eof)) return Tok;
return PP.LookAhead(N-1);
}
public:
/// NextToken - This peeks ahead one token and returns it without
/// consuming it.
const Token &NextToken() {
return PP.LookAhead(0);
}
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static TypeResult getTypeAnnotation(const Token &Tok) {
if (!Tok.getAnnotationValue())
return TypeError();
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
private:
static void setTypeAnnotation(Token &Tok, TypeResult T) {
assert((T.isInvalid() || T.get()) &&
"produced a valid-but-null type annotation?");
Tok.setAnnotationValue(T.isInvalid() ? nullptr : T.get().getAsOpaquePtr());
}
static NamedDecl *getNonTypeAnnotation(const Token &Tok) {
return static_cast<NamedDecl*>(Tok.getAnnotationValue());
}
static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) {
Tok.setAnnotationValue(ND);
}
static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) {
return static_cast<IdentifierInfo*>(Tok.getAnnotationValue());
}
static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) {
Tok.setAnnotationValue(ND);
}
/// Read an already-translated primary expression out of an annotation
/// token.
static ExprResult getExprAnnotation(const Token &Tok) {
return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue());
}
/// Set the primary expression corresponding to the given annotation
/// token.
static void setExprAnnotation(Token &Tok, ExprResult ER) {
Tok.setAnnotationValue(ER.getAsOpaquePointer());
}
public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
bool TryAnnotateTypeOrScopeToken();
bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
bool IsNewScope);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
bool MightBeCXXScopeToken() {
return Tok.is(tok::identifier) || Tok.is(tok::coloncolon) ||
(Tok.is(tok::annot_template_id) &&
NextToken().is(tok::coloncolon)) ||
Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super);
}
bool TryAnnotateOptionalCXXScopeToken(bool EnteringContext = false) {
return MightBeCXXScopeToken() && TryAnnotateCXXScopeToken(EnteringContext);
}
private:
enum AnnotatedNameKind {
/// Annotation has failed and emitted an error.
ANK_Error,
/// The identifier is a tentatively-declared name.
ANK_TentativeDecl,
/// The identifier is a template name. FIXME: Add an annotation for that.
ANK_TemplateName,
/// The identifier can't be resolved.
ANK_Unresolved,
/// Annotation was successful.
ANK_Success
};
AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr);
/// Push a tok::annot_cxxscope token onto the token stream.
void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
/// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
/// replacing them with the non-context-sensitive keywords. This returns
/// true if the token was replaced.
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
if (!getLangOpts().AltiVec && !getLangOpts().ZVector)
return false;
if (Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_bool &&
(!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
}
/// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
bool TryAltiVecVectorTokenOutOfLine();
bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid);
/// Returns true if the current token is the identifier 'instancetype'.
///
/// Should only be used in Objective-C language modes.
bool isObjCInstancetype() {
assert(getLangOpts().ObjC);
if (Tok.isAnnotation())
return false;
if (!Ident_instancetype)
Ident_instancetype = PP.getIdentifierInfo("instancetype");
return Tok.getIdentifierInfo() == Ident_instancetype;
}
/// TryKeywordIdentFallback - For compatibility with system headers using
/// keywords as identifiers, attempt to convert the current token to an
/// identifier and optionally disable the keyword for the remainder of the
/// translation unit. This returns false if the token was not replaced,
/// otherwise emits a diagnostic and returns true.
bool TryKeywordIdentFallback(bool DisableKeyword);
/// Get the TemplateIdAnnotation from the token.
TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok);
/// TentativeParsingAction - An object that is used as a kind of "tentative
/// parsing transaction". It gets instantiated to mark the token position and
/// after the token consumption is done, Commit() or Revert() is called to
/// either "commit the consumed tokens" or revert to the previously marked
/// token position. Example:
///
/// TentativeParsingAction TPA(*this);
/// ConsumeToken();
/// ....
/// TPA.Revert();
///
class TentativeParsingAction {
Parser &P;
PreferredTypeBuilder PrevPreferredType;
Token PrevTok;
size_t PrevTentativelyDeclaredIdentifierCount;
unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount;
bool isActive;
public:
explicit TentativeParsingAction(Parser& p) : P(p) {
PrevPreferredType = P.PreferredType;
PrevTok = P.Tok;
PrevTentativelyDeclaredIdentifierCount =
P.TentativelyDeclaredIdentifiers.size();
PrevParenCount = P.ParenCount;
PrevBracketCount = P.BracketCount;
PrevBraceCount = P.BraceCount;
P.PP.EnableBacktrackAtThisPos();
isActive = true;
}
void Commit() {
assert(isActive && "Parsing action was finished!");
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.PP.CommitBacktrackedTokens();
isActive = false;
}
void Revert() {
assert(isActive && "Parsing action was finished!");
P.PP.Backtrack();
P.PreferredType = PrevPreferredType;
P.Tok = PrevTok;
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.ParenCount = PrevParenCount;
P.BracketCount = PrevBracketCount;
P.BraceCount = PrevBraceCount;
isActive = false;
}
~TentativeParsingAction() {
assert(!isActive && "Forgot to call Commit or Revert!");
}
};
/// A TentativeParsingAction that automatically reverts in its destructor.
/// Useful for disambiguation parses that will always be reverted.
class RevertingTentativeParsingAction
: private Parser::TentativeParsingAction {
public:
RevertingTentativeParsingAction(Parser &P)
: Parser::TentativeParsingAction(P) {}
~RevertingTentativeParsingAction() { Revert(); }
};
class UnannotatedTentativeParsingAction;
/// ObjCDeclContextSwitch - An object used to switch context from
/// an objective-c decl context to its enclosing decl context and
/// back.
class ObjCDeclContextSwitch {
Parser &P;
Decl *DC;
SaveAndRestore<bool> WithinObjCContainer;
public:
explicit ObjCDeclContextSwitch(Parser &p)
: P(p), DC(p.getObjCDeclContext()),
WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) {
if (DC)
P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
~ObjCDeclContextSwitch() {
if (DC)
P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
}
};
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
/// input. If so, it is consumed and false is returned.
///
/// If a trivial punctuator misspelling is encountered, a FixIt error
/// diagnostic is issued and false is returned after recovery.
///
/// If the input is malformed, this emits the specified diagnostic and true is
/// returned.
bool ExpectAndConsume(tok::TokenKind ExpectedTok,
unsigned Diag = diag::err_expected,
StringRef DiagMsg = "");
/// The parser expects a semicolon and, if present, will consume it.
///
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
/// The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
OutsideFunction = 0,
InsideStruct = 1,
InstanceVariableList = 2,
AfterMemberFunctionDefinition = 3
};
/// Consume any extra semi-colons until the end of the line.
void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified);
/// Return false if the next token is an identifier. An 'expected identifier'
/// error is emitted otherwise.
///
/// The parser tries to recover from the error by checking if the next token
/// is a C++ keyword when parsing Objective-C++. Return false if the recovery
/// was successful.
bool expectIdentifier();
/// Kinds of compound pseudo-tokens formed by a sequence of two real tokens.
enum class CompoundToken {
/// A '(' '{' beginning a statement-expression.
StmtExprBegin,
/// A '}' ')' ending a statement-expression.
StmtExprEnd,
/// A '[' '[' beginning a C++11 or C2x attribute.
AttrBegin,
/// A ']' ']' ending a C++11 or C2x attribute.
AttrEnd,
/// A '::' '*' forming a C++ pointer-to-member declaration.
MemberPtr,
};
/// Check that a compound operator was written in a "sensible" way, and warn
/// if not.
void checkCompoundToken(SourceLocation FirstTokLoc,
tok::TokenKind FirstTokKind, CompoundToken Op);
public:
//===--------------------------------------------------------------------===//
// Scope manipulation
/// ParseScope - Introduces a new scope for parsing. The kind of
/// scope is determined by ScopeFlags. Objects of this type should
/// be created on the stack to coincide with the position where the
/// parser enters the new scope, and this object's constructor will
/// create that new scope. Similarly, once the object is destroyed
/// the parser will exit the scope.
class ParseScope {
Parser *Self;
ParseScope(const ParseScope &) = delete;
void operator=(const ParseScope &) = delete;
public:
// ParseScope - Construct a new object to manage a scope in the
// parser Self where the new Scope is created with the flags
// ScopeFlags, but only when we aren't about to enter a compound statement.
ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true,
bool BeforeCompoundStmt = false)
: Self(Self) {
if (EnteredScope && !BeforeCompoundStmt)
Self->EnterScope(ScopeFlags);
else {
if (BeforeCompoundStmt)
Self->incrementMSManglingNumber();
this->Self = nullptr;
}
}
// Exit - Exit the scope associated with this object now, rather
// than waiting until the object is destroyed.
void Exit() {
if (Self) {
Self->ExitScope();
Self = nullptr;
}
}
~ParseScope() {
Exit();
}
};
/// Introduces zero or more scopes for parsing. The scopes will all be exited
/// when the object is destroyed.
class MultiParseScope {
Parser &Self;
unsigned NumScopes = 0;
MultiParseScope(const MultiParseScope&) = delete;
public:
MultiParseScope(Parser &Self) : Self(Self) {}
void Enter(unsigned ScopeFlags) {
Self.EnterScope(ScopeFlags);
++NumScopes;
}
void Exit() {
while (NumScopes) {
Self.ExitScope();
--NumScopes;
}
}
~MultiParseScope() {
Exit();
}
};
/// EnterScope - Start a new scope.
void EnterScope(unsigned ScopeFlags);
/// ExitScope - Pop a scope off the scope stack.
void ExitScope();
/// Re-enter the template scopes for a declaration that might be a template.
unsigned ReenterTemplateScopes(MultiParseScope &S, Decl *D);
private:
/// RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
unsigned OldFlags;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
public:
ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
~ParseScopeFlags();
};
//===--------------------------------------------------------------------===//
// Diagnostic Emission and Error recovery.
public:
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
DiagnosticBuilder Diag(unsigned DiagID) {
return Diag(Tok, DiagID);
}
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange);
void CheckNestedObjCContexts(SourceLocation AtLoc);
public:
/// Control flags for SkipUntil functions.
enum SkipUntilFlags {
StopAtSemi = 1 << 0, ///< Stop skipping at semicolon
/// Stop skipping at specified token, but don't skip the token itself
StopBeforeMatch = 1 << 1,
StopAtCodeCompletion = 1 << 2 ///< Stop at code completion
};
friend constexpr SkipUntilFlags operator|(SkipUntilFlags L,
SkipUntilFlags R) {
return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) |
static_cast<unsigned>(R));
}
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless StopBeforeMatch is specified). Because we cannot guarantee
/// that the token will ever occur, this skips to the next token, or to some
/// likely good stopping point. If Flags has StopAtSemi flag, skipping will
/// stop at a ';' character. Balances (), [], and {} delimiter tokens while
/// skipping.
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool SkipUntil(tok::TokenKind T,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
return SkipUntil(llvm::makeArrayRef(T), Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2, T3};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(ArrayRef<tok::TokenKind> Toks,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0));
/// SkipMalformedDecl - Read tokens until we get to some likely good stopping
/// point for skipping past a simple-declaration.
void SkipMalformedDecl();
/// The location of the first statement inside an else that might
/// have a missleading indentation. If there is no
/// MisleadingIndentationChecker on an else active, this location is invalid.
SourceLocation MisleadingIndentationElseLoc;
private:
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
struct ParsingClass;
/// [class.mem]p1: "... the class is regarded as complete within
/// - function bodies
/// - default arguments
/// - exception-specifications (TODO: C++0x)
/// - and brace-or-equal-initializers for non-static data members
/// (including such things in nested classes)."
/// LateParsedDeclarations build the tree of those elements so they can
/// be parsed after parsing the top-level class.
class LateParsedDeclaration {
public:
virtual ~LateParsedDeclaration();
virtual void ParseLexedMethodDeclarations();
virtual void ParseLexedMemberInitializers();
virtual void ParseLexedMethodDefs();
virtual void ParseLexedAttributes();
virtual void ParseLexedPragmas();
};
/// Inner node of the LateParsedDeclaration tree that parses
/// all its members recursively.
class LateParsedClass : public LateParsedDeclaration {
public:
LateParsedClass(Parser *P, ParsingClass *C);
~LateParsedClass() override;
void ParseLexedMethodDeclarations() override;
void ParseLexedMemberInitializers() override;
void ParseLexedMethodDefs() override;
void ParseLexedAttributes() override;
void ParseLexedPragmas() override;
private:
Parser *Self;
ParsingClass *Class;
};
/// Contains the lexed tokens of an attribute with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
/// FIXME: Perhaps we should change the name of LateParsedDeclaration to
/// LateParsedTokens.
struct LateParsedAttribute : public LateParsedDeclaration {
Parser *Self;
CachedTokens Toks;
IdentifierInfo &AttrName;
IdentifierInfo *MacroII = nullptr;
SourceLocation AttrNameLoc;
SmallVector<Decl*, 2> Decls;
explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
SourceLocation Loc)
: Self(P), AttrName(Name), AttrNameLoc(Loc) {}
void ParseLexedAttributes() override;
void addDecl(Decl *D) { Decls.push_back(D); }
};
/// Contains the lexed tokens of a pragma with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
class LateParsedPragma : public LateParsedDeclaration {
Parser *Self = nullptr;
AccessSpecifier AS = AS_none;
CachedTokens Toks;
public:
explicit LateParsedPragma(Parser *P, AccessSpecifier AS)
: Self(P), AS(AS) {}
void takeToks(CachedTokens &Cached) { Toks.swap(Cached); }
const CachedTokens &toks() const { return Toks; }
AccessSpecifier getAccessSpecifier() const { return AS; }
void ParseLexedPragmas() override;
};
// A list of late-parsed attributes. Used by ParseGNUAttributes.
class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> {
public:
LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { }
bool parseSoon() { return ParseSoon; }
private:
bool ParseSoon; // Are we planning to parse these shortly after creation?
};
/// Contains the lexed tokens of a member function definition
/// which needs to be parsed at the end of the class declaration
/// after parsing all other member declarations.
struct LexedMethod : public LateParsedDeclaration {
Parser *Self;
Decl *D;
CachedTokens Toks;
explicit LexedMethod(Parser *P, Decl *MD) : Self(P), D(MD) {}
void ParseLexedMethodDefs() override;
};
/// LateParsedDefaultArgument - Keeps track of a parameter that may
/// have a default argument that cannot be parsed yet because it
/// occurs within a member function declaration inside the class
/// (C++ [class.mem]p2).
struct LateParsedDefaultArgument {
explicit LateParsedDefaultArgument(Decl *P,
std::unique_ptr<CachedTokens> Toks = nullptr)
: Param(P), Toks(std::move(Toks)) { }
/// Param - The parameter declaration for this parameter.
Decl *Param;
/// Toks - The sequence of tokens that comprises the default
/// argument expression, not including the '=' or the terminating
/// ')' or ','. This will be NULL for parameters that have no
/// default argument.
std::unique_ptr<CachedTokens> Toks;
};
/// LateParsedMethodDeclaration - A method declaration inside a class that
/// contains at least one entity whose parsing needs to be delayed
/// until the class itself is completely-defined, such as a default
/// argument (C++ [class.mem]p2).
struct LateParsedMethodDeclaration : public LateParsedDeclaration {
explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
: Self(P), Method(M), ExceptionSpecTokens(nullptr) {}
void ParseLexedMethodDeclarations() override;
Parser *Self;
/// Method - The method declaration.
Decl *Method;
/// DefaultArgs - Contains the parameters of the function and
/// their default arguments. At least one of the parameters will
/// have a default argument, but all of the parameters of the
/// method will be stored so that they can be reintroduced into
/// scope at the appropriate times.
SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
/// The set of tokens that make up an exception-specification that
/// has not yet been parsed.
CachedTokens *ExceptionSpecTokens;
};
/// LateParsedMemberInitializer - An initializer for a non-static class data
/// member whose parsing must to be delayed until the class is completely
/// defined (C++11 [class.mem]p2).
struct LateParsedMemberInitializer : public LateParsedDeclaration {
LateParsedMemberInitializer(Parser *P, Decl *FD)
: Self(P), Field(FD) { }
void ParseLexedMemberInitializers() override;
Parser *Self;
/// Field - The field declaration.
Decl *Field;
/// CachedTokens - The sequence of tokens that comprises the initializer,
/// including any leading '='.
CachedTokens Toks;
};
/// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
/// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definition is completed (C++ [class.mem]p2),
/// the method declarations and possibly attached inline definitions
/// will be stored here with the tokens that will be parsed to create those
/// entities.
typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
/// Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
: TopLevelClass(TopLevelClass), IsInterface(IsInterface),
TagOrTemplate(TagOrTemplate) {}
/// Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
bool TopLevelClass : 1;
/// Whether this class is an __interface.
bool IsInterface : 1;
/// The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
/// LateParsedDeclarations - Method declarations, inline definitions and
/// nested classes that contain pieces whose parsing will be delayed until
/// the top-level class is fully defined.
LateParsedDeclarationsContainer LateParsedDeclarations;
};
/// The stack of classes that is currently being
/// parsed. Nested and local classes will be pushed onto this stack
/// when they are parsed, and removed afterward.
std::stack<ParsingClass *> ClassStack;
ParsingClass &getCurrentClass() {
assert(!ClassStack.empty() && "No lexed method stacks!");
return *ClassStack.top();
}
/// RAII object used to manage the parsing of a class definition.
class ParsingClassDefinition {
Parser &P;
bool Popped;
Sema::ParsingClassState State;
public:
ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass,
bool IsInterface)
: P(P), Popped(false),
State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) {
}
/// Pop this class of the stack.
void Pop() {
assert(!Popped && "Nested class has already been popped");
Popped = true;
P.PopParsingClass(State);
}
~ParsingClassDefinition() {
if (!Popped)
P.PopParsingClass(State);
}
};
/// Contains information about any template-specific
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
ParsedTemplateInfo()
: Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { }
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
bool lastParameterListWasEmpty = false)
: Kind(isSpecialization? ExplicitSpecialization : Template),
TemplateParams(TemplateParams),
LastParameterListWasEmpty(lastParameterListWasEmpty) { }
explicit ParsedTemplateInfo(SourceLocation ExternLoc,
SourceLocation TemplateLoc)
: Kind(ExplicitInstantiation), TemplateParams(nullptr),
ExternLoc(ExternLoc), TemplateLoc(TemplateLoc),
LastParameterListWasEmpty(false){ }
/// The kind of template we are parsing.
enum {
/// We are not parsing a template at all.
NonTemplate = 0,
/// We are parsing a template declaration.
Template,
/// We are parsing an explicit specialization.
ExplicitSpecialization,
/// We are parsing an explicit instantiation.
ExplicitInstantiation
} Kind;
/// The template parameter lists, for template declarations
/// and explicit specializations.
TemplateParameterLists *TemplateParams;
/// The location of the 'extern' keyword, if any, for an explicit
/// instantiation
SourceLocation ExternLoc;
/// The location of the 'template' keyword, for an explicit
/// instantiation.
SourceLocation TemplateLoc;
/// Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
SourceRange getSourceRange() const LLVM_READONLY;
};
// In ParseCXXInlineMethods.cpp.
struct ReenterTemplateScopeRAII;
struct ReenterClassScopeRAII;
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT);
static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT);
Sema::ParsingClassState
PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
void DeallocateParsedClasses(ParsingClass *Class);
void PopParsingClass(Sema::ParsingClassState);
enum CachedInitKind {
CIK_DefaultArgument,
CIK_DefaultInitializer
};
NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS,
ParsedAttributes &AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
const VirtSpecifiers &VS,
SourceLocation PureSpecLoc);
void ParseCXXNonStaticMemberInitializer(Decl *VarD);
void ParseLexedAttributes(ParsingClass &Class);
void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition);
void ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
void ParseLexedMethodDef(LexedMethod &LM);
void ParseLexedMemberInitializers(ParsingClass &Class);
void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod);
void ParseLexedPragmas(ParsingClass &Class);
void ParseLexedPragma(LateParsedPragma &LP);
bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK);
bool ConsumeAndStoreConditional(CachedTokens &Toks);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true) {
return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken);
}
bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true);
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
struct ParsedAttributesWithRange : ParsedAttributes {
ParsedAttributesWithRange(AttributeFactory &factory)
: ParsedAttributes(factory) {}
void clear() {
ParsedAttributes::clear();
Range = SourceRange();
}
SourceRange Range;
};
struct ParsedAttributesViewWithRange : ParsedAttributesView {
ParsedAttributesViewWithRange() : ParsedAttributesView() {}
void clearListOnly() {
ParsedAttributesView::clearListOnly();
Range = SourceRange();
}
SourceRange Range;
};
DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr,
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
ParsingDeclSpec &DS,
AccessSpecifier AS);
void SkipFunctionBody();
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
LateParsedAttrList *LateParsedAttrs = nullptr);
void ParseKNRParamDeclarations(Declarator &D);
// EndLoc is filled with the location of the last token of the simple-asm.
ExprResult ParseSimpleAsm(bool ForAsmLabel, SourceLocation *EndLoc);
ExprResult ParseAsmStringLiteral(bool ForAsmLabel);
// Objective-C External Declarations
void MaybeSkipAttributes(tok::ObjCKeywordKind Kind);
DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs);
DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
class ObjCTypeParamListScope;
ObjCTypeParamList *parseObjCTypeParamList();
ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs(
ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
SmallVectorImpl<IdentifierLocPair> &protocolIdents,
SourceLocation &rAngleLoc, bool mayBeProtocolList = true);
void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
BalancedDelimiterTracker &T,
SmallVectorImpl<Decl *> &AllIvarDecls,
bool RBraceMissing);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
SmallVectorImpl<SourceLocation> &PLocs,
bool WarnOnDeclarations,
bool ForObjCContainer,
SourceLocation &LAngleLoc,
SourceLocation &EndProtoLoc,
bool consumeLastToken);
/// Parse the first angle-bracket-delimited clause for an
/// Objective-C object or object pointer type, which may be either
/// type arguments or protocol qualifiers.
void parseObjCTypeArgsOrProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken,
bool warnOnIncompleteProtocols);
/// Parse either Objective-C type arguments or protocol qualifiers; if the
/// former, also parse protocol qualifiers afterward.
void parseObjCTypeArgsAndProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken);
/// Parse a protocol qualifier type such as '<NSCopying>', which is
/// an anachronistic way of writing 'id<NSCopying>'.
TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc);
/// Parse Objective-C type arguments and protocol qualifiers, extending the
/// current type with the parsed result.
TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc,
ParsedType type,
bool consumeLastToken,
SourceLocation &endLoc);
void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl);
DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
ParsedAttributes &prefixAttrs);
struct ObjCImplParsingDataRAII {
Parser &P;
Decl *Dcl;
bool HasCFunction;
typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
LateParsedObjCMethodContainer LateParsedObjCMethods;
ObjCImplParsingDataRAII(Parser &parser, Decl *D)
: P(parser), Dcl(D), HasCFunction(false) {
P.CurParsedObjCImpl = this;
Finished = false;
}
~ObjCImplParsingDataRAII();
void finish(SourceRange AtEnd);
bool isFinished() const { return Finished; }
private:
bool Finished;
};
ObjCImplParsingDataRAII *CurParsedObjCImpl;
void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl);
DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc,
ParsedAttributes &Attrs);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
Decl *ParseObjCPropertyDynamic(SourceLocation atLoc);
IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation);
// Definitions for Objective-c context sensitive keywords recognition.
enum ObjCTypeQual {
objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref,
objc_nonnull, objc_nullable, objc_null_unspecified,
objc_NumQuals
};
IdentifierInfo *ObjCTypeQuals[objc_NumQuals];
bool isTokIdentifier_in() const;
ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx,
ParsedAttributes *ParamAttrs);
void ParseObjCMethodRequirement();
Decl *ParseObjCMethodPrototype(
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition = true);
Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition=true);
void ParseObjCPropertyAttribute(ObjCDeclSpec &DS);
Decl *ParseObjCMethodDefinition();
public:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
/// TypeCastState - State whether an expression is or may be a type cast.
enum TypeCastState {
NotTypeCast = 0,
MaybeTypeCast,
IsTypeCast
};
ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpressionInExprEvalContext(
TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseCaseExpression(SourceLocation CaseLoc);
ExprResult ParseConstraintExpression();
ExprResult
ParseConstraintLogicalAndExpression(bool IsTrailingRequiresClause);
ExprResult ParseConstraintLogicalOrExpression(bool IsTrailingRequiresClause);
// Expr that doesn't include commas.
ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
unsigned &NumLineToksConsumed,
bool IsUnevaluated);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS,
prec::Level MinPrec);
/// Control what ParseCastExpression will parse.
enum CastParseKind {
AnyCastExpr = 0,
UnaryExprOnly,
PrimaryExprOnly
};
ExprResult ParseCastExpression(CastParseKind ParseKind,
bool isAddressOfOperand,
bool &NotCastExpr,
TypeCastState isTypeCast,
bool isVectorLiteral = false,
bool *NotPrimaryExpression = nullptr);
ExprResult ParseCastExpression(CastParseKind ParseKind,
bool isAddressOfOperand = false,
TypeCastState isTypeCast = NotTypeCast,
bool isVectorLiteral = false,
bool *NotPrimaryExpression = nullptr);
/// Returns true if the next token cannot start an expression.
bool isNotExpressionStart();
/// Returns true if the next token would start a postfix-expression
/// suffix.
bool isPostfixExpressionSuffixStart() {
tok::TokenKind K = Tok.getKind();
return (K == tok::l_square || K == tok::l_paren ||
K == tok::period || K == tok::arrow ||
K == tok::plusplus || K == tok::minusminus);
}
bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less);
void checkPotentialAngleBracket(ExprResult &PotentialTemplateName);
bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &,
const Token &OpToken);
bool checkPotentialAngleBracketDelimiter(const Token &OpToken) {
if (auto *Info = AngleBrackets.getCurrent(*this))
return checkPotentialAngleBracketDelimiter(*Info, OpToken);
return false;
}
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
ParsedType &CastTy,
SourceRange &CastRange);
typedef SmallVector<Expr*, 20> ExprListTy;
typedef SmallVector<SourceLocation, 20> CommaLocsTy;
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
llvm::function_ref<void()> ExpressionStarts =
llvm::function_ref<void()>());
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
/// used for misc language extensions.
bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs);
/// ParenParseOption - Control what ParseParenExpression will parse.
enum ParenParseOption {
SimpleExpr, // Only parse '(' expression ')'
FoldExpr, // Also allow fold-expression <anything>
CompoundStmt, // Also allow '(' compound-statement ')'
CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}'
CastExpr // Also allow '(' type-name ')' <anything>
};
ExprResult ParseParenExpression(ParenParseOption &ExprType,
bool stopIfCastExpr,
bool isTypeCast,
ParsedType &CastTy,
SourceLocation &RParenLoc);
ExprResult ParseCXXAmbiguousParenExpression(
ParenParseOption &ExprType, ParsedType &CastTy,
BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt);
ExprResult ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
ExprResult ParseGenericSelectionExpression();
ExprResult ParseObjCBoolLiteral();
ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T);
//===--------------------------------------------------------------------===//
// C++ Expressions
ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
Token &Replacement);
ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
bool areTokensAdjacent(const Token &A, const Token &B);
void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
ParsedType ObjectType,
bool ObjectHasErrors,
bool EnteringContext,
bool *MayBePseudoDestructor = nullptr,
bool IsTypename = false,
IdentifierInfo **LastII = nullptr,
bool OnlyNamespace = false,
bool InUsingDeclaration = false);
//===--------------------------------------------------------------------===//
// C++11 5.1.2: Lambda expressions
/// Result of tentatively parsing a lambda-introducer.
enum class LambdaIntroducerTentativeParse {
/// This appears to be a lambda-introducer, which has been fully parsed.
Success,
/// This is a lambda-introducer, but has not been fully parsed, and this
/// function needs to be called again to parse it.
Incomplete,
/// This is definitely an Objective-C message send expression, rather than
/// a lambda-introducer, attribute-specifier, or array designator.
MessageSend,
/// This is not a lambda-introducer.
Invalid,
};
// [...] () -> type {...}
ExprResult ParseLambdaExpression();
ExprResult TryParseLambdaExpression();
bool
ParseLambdaIntroducer(LambdaIntroducer &Intro,
LambdaIntroducerTentativeParse *Tentative = nullptr);
ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro);
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Casts
ExprResult ParseCXXCasts();
/// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast.
ExprResult ParseBuiltinBitCast();
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Type Identification
ExprResult ParseCXXTypeid();
//===--------------------------------------------------------------------===//
// C++ : Microsoft __uuidof Expression
ExprResult ParseCXXUuidof();
//===--------------------------------------------------------------------===//
// C++ 5.2.4: C++ Pseudo-Destructor Expressions
ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType);
//===--------------------------------------------------------------------===//
// C++ 9.3.2: C++ 'this' pointer
ExprResult ParseCXXThis();
//===--------------------------------------------------------------------===//
// C++ 15: C++ Throw Expression
ExprResult ParseThrowExpression();
ExceptionSpecificationType tryParseExceptionSpecification(
bool Delayed,
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &DynamicExceptions,
SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
ExprResult &NoexceptExpr,
CachedTokens *&ExceptionSpecTokens);
// EndLoc is filled with the location of the last token of the specification.
ExceptionSpecificationType ParseDynamicExceptionSpecification(
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &Exceptions,
SmallVectorImpl<SourceRange> &Ranges);
//===--------------------------------------------------------------------===//
// C++0x 8: Function declaration trailing-return-type
TypeResult ParseTrailingReturnType(SourceRange &Range,
bool MayBeFollowedByDirectInit);
//===--------------------------------------------------------------------===//
// C++ 2.13.5: C++ Boolean Literals
ExprResult ParseCXXBoolLiteral();
//===--------------------------------------------------------------------===//
// C++ 5.2.3: Explicit type conversion (functional notation)
ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
//===--------------------------------------------------------------------===//
// C++ 5.3.4 and 5.3.5: C++ new and delete
bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs,
Declarator &D);
void ParseDirectNewDeclarator(Declarator &D);
ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start);
ExprResult ParseCXXDeleteExpression(bool UseGlobal,
SourceLocation Start);
//===--------------------------------------------------------------------===//
// C++ if/switch/while/for condition expression.
struct ForRangeInfo;
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK,
ForRangeInfo *FRI = nullptr);
//===--------------------------------------------------------------------===//
// C++ Coroutines
ExprResult ParseCoyieldExpression();
//===--------------------------------------------------------------------===//
// C++ Concepts
ExprResult ParseRequiresExpression();
void ParseTrailingRequiresClause(Declarator &D);
//===--------------------------------------------------------------------===//
// C99 6.7.8: Initialization.
/// ParseInitializer
/// initializer: [C99 6.7.8]
/// assignment-expression
/// '{' ...
ExprResult ParseInitializer() {
if (Tok.isNot(tok::l_brace))
return ParseAssignmentExpression();
return ParseBraceInitializer();
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
ExprResult ParseInitializerWithPotentialDesignator(
llvm::function_ref<void(const Designation &)> CodeCompleteCB);
//===--------------------------------------------------------------------===//
// clang Expressions
ExprResult ParseBlockLiteralExpression(); // ^{...}
//===--------------------------------------------------------------------===//
// Objective-C Expressions
ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
bool isSimpleObjCMessageExpression();
ExprResult ParseObjCMessageExpression();
ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
Expr *ReceiverExpr);
ExprResult ParseAssignmentExprWithObjCMessageExprStart(
SourceLocation LBracloc, SourceLocation SuperLoc,
ParsedType ReceiverType, Expr *ReceiverExpr);
bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
/// A SmallVector of statements, with stack size 32 (as that is the only one
/// used.)
typedef SmallVector<Stmt*, 32> StmtVector;
/// A SmallVector of expressions, with stack size 12 (the maximum used.)
typedef SmallVector<Expr*, 12> ExprVector;
/// A SmallVector of types.
typedef SmallVector<ParsedType, 12> TypeVector;
StmtResult
ParseStatement(SourceLocation *TrailingElseLoc = nullptr,
ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt);
StmtResult ParseStatementOrDeclaration(
StmtVector &Stmts, ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc = nullptr);
StmtResult ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
StmtResult ParseExprStatement(ParsedStmtContext StmtCtx);
StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs,
ParsedStmtContext StmtCtx);
StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx,
bool MissingCase = false,
ExprResult Expr = ExprResult());
StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx);
StmtResult ParseCompoundStatement(bool isStmtExpr = false);
StmtResult ParseCompoundStatement(bool isStmtExpr,
unsigned ScopeFlags);
void ParseCompoundStatementLeadingPragmas();
bool ConsumeNullStmt(StmtVector &Stmts);
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
SourceLocation Loc, Sema::ConditionKind CK,
SourceLocation *LParenLoc = nullptr,
SourceLocation *RParenLoc = nullptr);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseDoStatement();
StmtResult ParseForStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseGotoStatement();
StmtResult ParseContinueStatement();
StmtResult ParseBreakStatement();
StmtResult ParseReturnStatement();
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
StmtResult ParsePragmaLoopHint(StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
/// Describes the behavior that should be taken for an __if_exists
/// block.
enum IfExistsBehavior {
/// Parse the block; this code is always used.
IEB_Parse,
/// Skip the block entirely; this code is never used.
IEB_Skip,
/// Parse the block as a dependent block, which may be used in
/// some template instantiations but not others.
IEB_Dependent
};
/// Describes the condition of a Microsoft __if_exists or
/// __if_not_exists block.
struct IfExistsCondition {
/// The location of the initial keyword.
SourceLocation KeywordLoc;
/// Whether this is an __if_exists block (rather than an
/// __if_not_exists block).
bool IsIfExists;
/// Nested-name-specifier preceding the name.
CXXScopeSpec SS;
/// The name we're looking for.
UnqualifiedId Name;
/// The behavior of this __if_exists or __if_not_exists block
/// should.
IfExistsBehavior Behavior;
};
bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
void ParseMicrosoftIfExistsExternalDeclaration();
void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
ParsedAttributes &AccessAttrs,
AccessSpecifier &CurAS);
bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
bool &InitExprsOk);
bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
SmallVectorImpl<Expr *> &Constraints,
SmallVectorImpl<Expr *> &Exprs);
//===--------------------------------------------------------------------===//
// C++ 6: Statements and Blocks
StmtResult ParseCXXTryBlock();
StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false);
StmtResult ParseCXXCatchBlock(bool FnCatch = false);
//===--------------------------------------------------------------------===//
// MS: SEH Statements and Blocks
StmtResult ParseSEHTryBlock();
StmtResult ParseSEHExceptBlock(SourceLocation Loc);
StmtResult ParseSEHFinallyBlock(SourceLocation Loc);
StmtResult ParseSEHLeaveStatement();
//===--------------------------------------------------------------------===//
// Objective-C Statements
StmtResult ParseObjCAtStatement(SourceLocation atLoc,
ParsedStmtContext StmtCtx);
StmtResult ParseObjCTryStmt(SourceLocation atLoc);
StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc);
//===--------------------------------------------------------------------===//
// C99 6.7: Declarations.
/// A context for parsing declaration specifiers. TODO: flesh this
/// out, there are other significant restrictions on specifiers than
/// would be best implemented in the parser.
enum class DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
DSC_top_level, // top-level/namespace declaration context
DSC_template_param, // template parameter context
DSC_template_type_arg, // template type argument context
DSC_objc_method_result, // ObjC method result context, enables 'instancetype'
DSC_condition // condition declaration context
};
/// Is this a context in which we are parsing just a type-specifier (or
/// trailing-type-specifier)?
static bool isTypeSpecifier(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
return false;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Whether a defining-type-specifier is permitted in a given context.
enum class AllowDefiningTypeSpec {
/// The grammar doesn't allow a defining-type-specifier here, and we must
/// not parse one (eg, because a '{' could mean something else).
No,
/// The grammar doesn't allow a defining-type-specifier here, but we permit
/// one for error recovery purposes. Sema will reject.
NoButErrorRecovery,
/// The grammar allows a defining-type-specifier here, even though it's
/// always invalid. Sema will reject.
YesButInvalid,
/// The grammar allows a defining-type-specifier here, and one can be valid.
Yes
};
/// Is this a context in which we are parsing defining-type-specifiers (and
/// so permit class and enum definitions in addition to non-defining class and
/// enum elaborated-type-specifiers)?
static AllowDefiningTypeSpec
isDefiningTypeSpecifierContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_alias_declaration:
case DeclSpecContext::DSC_objc_method_result:
return AllowDefiningTypeSpec::Yes;
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_template_param:
return AllowDefiningTypeSpec::YesButInvalid;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
return AllowDefiningTypeSpec::NoButErrorRecovery;
case DeclSpecContext::DSC_trailing:
return AllowDefiningTypeSpec::No;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which an opaque-enum-declaration can appear?
static bool isOpaqueEnumDeclarationContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
return true;
case DeclSpecContext::DSC_alias_declaration:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which we can perform class template argument
/// deduction?
static bool isClassTemplateDeductionContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_type_specifier:
return true;
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Information on a C++0x for-range-initializer found while parsing a
/// declaration which turns out to be a for-range-declaration.
struct ForRangeInit {
SourceLocation ColonLoc;
ExprResult RangeExpr;
bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); }
};
struct ForRangeInfo : ForRangeInit {
StmtResult LoopVar;
};
DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
SourceLocation *DeclSpecStart = nullptr);
DeclGroupPtrTy
ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs, bool RequireSemi,
ForRangeInit *FRI = nullptr,
SourceLocation *DeclSpecStart = nullptr);
bool MightBeDeclarator(DeclaratorContext Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context,
SourceLocation *DeclEnd = nullptr,
ForRangeInit *FRI = nullptr);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
bool ParseAsmAttributesAfterDeclarator(Declarator &D);
Decl *ParseDeclarationAfterDeclaratorAndAttributes(
Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ForRangeInit *FRI = nullptr);
Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope);
Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope);
/// When in code-completion, skip parsing of the function/method body
/// unless the body contains the code-completion point.
///
/// \returns true if the function body was skipped.
bool trySkippingFunctionBody();
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
ParsedAttributesWithRange &Attrs);
DeclSpecContext
getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context);
void ParseDeclarationSpecifiers(
DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal,
LateParsedAttrList *LateAttrs = nullptr);
bool DiagnoseMissingSemiAfterTagDefinition(
DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs = nullptr);
void ParseSpecifierQualifierList(
DeclSpec &DS, AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
DeclaratorContext Context);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType,
RecordDecl *TagDecl);
void ParseStructDeclaration(
ParsingDeclSpec &DS,
llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback);
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
/// is definitely a type-specifier. Return false if it isn't part of a type
/// specifier or if we're not sure.
bool isKnownToBeTypeSpecifier(const Token &Tok) const;
/// Return true if we know that we are definitely looking at a
/// decl-specifier, and isn't part of an expression such as a function-style
/// cast. Return false if it's no a decl-specifier, or we're not sure.
bool isKnownToBeDeclarationSpecifier() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationSpecifier() == TPResult::True;
return isDeclarationSpecifier(true);
}
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
bool isDeclarationStatement() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationStatement();
return isDeclarationSpecifier(true);
}
/// isForInitDeclaration - Disambiguates between a declaration or an
/// expression in the context of the C 'clause-1' or the C++
// 'for-init-statement' part of a 'for' statement.
/// Returns true for declaration, false for expression.
bool isForInitDeclaration() {
if (getLangOpts().OpenMP)
Actions.startOpenMPLoop();
if (getLangOpts().CPlusPlus)
return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
return isDeclarationSpecifier(true);
}
/// Determine whether this is a C++1z for-range-identifier.
bool isForRangeIdentifier();
/// Determine whether we are currently at the start of an Objective-C
/// class message that appears to be missing the open bracket '['.
bool isStartOfObjCClassMessageMissingOpenBracket();
/// Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false);
/// Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
TypeIdAsTemplateArgument
};
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
bool isTypeIdInParens(bool &isAmbiguous) {
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdInParens, isAmbiguous);
isAmbiguous = false;
return isTypeSpecifierQualifier();
}
bool isTypeIdInParens() {
bool isAmbiguous;
return isTypeIdInParens(isAmbiguous);
}
/// Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
bool IsAmbiguous;
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
return isTypeSpecifierQualifier();
}
/// isCXXDeclarationStatement - C++-specialized function that disambiguates
/// between a declaration or an expression statement, when parsing function
/// bodies. Returns true for declaration, false for expression.
bool isCXXDeclarationStatement();
/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
/// between a simple-declaration or an expression-statement.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
/// Returns false if the statement is disambiguated as expression.
bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
/// Returns true for function declarator and false for constructor-style
/// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration
/// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr);
struct ConditionDeclarationOrInitStatementState;
enum class ConditionOrInitStatement {
Expression, ///< Disambiguated as an expression (either kind).
ConditionDecl, ///< Disambiguated as the declaration form of condition.
InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement.
ForRangeDecl, ///< Disambiguated as a for-range declaration.
Error ///< Can't be any of the above!
};
/// Disambiguates between the different kinds of things that can happen
/// after 'if (' or 'switch ('. This could be one of two different kinds of
/// declaration (depending on whether there is a ';' later) or an expression.
ConditionOrInitStatement
isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt,
bool CanBeForRangeDecl);
bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous);
bool isCXXTypeId(TentativeCXXTypeIdContext Context) {
bool isAmbiguous;
return isCXXTypeId(Context, isAmbiguous);
}
/// TPResult - Used as the result value for functions whose purpose is to
/// disambiguate C++ constructs by "tentatively parsing" them.
enum class TPResult {
True, False, Ambiguous, Error
};
/// Determine whether we could have an enum-base.
///
/// \p AllowSemi If \c true, then allow a ';' after the enum-base; otherwise
/// only consider this to be an enum-base if the next token is a '{'.
///
/// \return \c false if this cannot possibly be an enum base; \c true
/// otherwise.
bool isEnumBase(bool AllowSemi);
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a
/// declaration specifier, TPResult::False if it is not,
/// TPResult::Ambiguous if it could be either a decl-specifier or a
/// function-style cast, and TPResult::Error if a parsing error was
/// encountered. If it could be a braced C++11 function-style cast, returns
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False,
bool *InvalidAsDeclSpec = nullptr);
/// Given that isCXXDeclarationSpecifier returns \c TPResult::True or
/// \c TPResult::Ambiguous, determine whether the decl-specifier would be
/// a type-specifier other than a cv-qualifier.
bool isCXXDeclarationSpecifierAType();
/// Determine whether the current token sequence might be
/// '<' template-argument-list '>'
/// rather than a less-than expression.
TPResult isTemplateArgumentList(unsigned TokensToSkip);
/// Determine whether an '(' after an 'explicit' keyword is part of a C++20
/// 'explicit(bool)' declaration, in earlier language modes where that is an
/// extension.
TPResult isExplicitBool();
/// Determine whether an identifier has been tentatively declared as a
/// non-type. Such tentative declarations should not be found to name a type
/// during a tentative parse, but also should not be annotated as a non-type.
bool isTentativelyDeclared(IdentifierInfo *II);
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error.
// Returning TPResult::True/False indicates that the ambiguity was
// resolved and tentative parsing may stop. TPResult::Ambiguous indicates
// that more tentative parsing is necessary for disambiguation.
// They all consume tokens, so backtracking should be used after calling them.
TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true,
bool mayHaveDirectInit = false);
TPResult
TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr,
bool VersusTemplateArg = false);
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
/// Try to skip a possibly empty sequence of 'attribute-specifier's without
/// full validation of the syntactic structure of attributes.
bool TrySkipAttributes();
public:
TypeResult ParseTypeName(SourceRange *Range = nullptr,
DeclaratorContext Context
= DeclaratorContext::TypeNameContext,
AccessSpecifier AS = AS_none,
Decl **OwnedType = nullptr,
ParsedAttributes *Attrs = nullptr);
private:
void ParseBlockId(SourceLocation CaretLoc);
/// Are [[]] attributes enabled?
bool standardAttributesAllowed() const {
const LangOptions &LO = getLangOpts();
return LO.DoubleSquareBracketAttributes;
}
// Check for the start of an attribute-specifier-seq in a context where an
// attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
bool DiagnoseProhibitedCXX11Attribute();
void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation) {
if (!standardAttributesAllowed())
return;
if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
}
void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation);
void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
DeclSpec &DS, Sema::TagUseKind TUK);
// FixItLoc = possible correct location for the attributes
void ProhibitAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clear();
}
void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clearListOnly();
}
void DiagnoseProhibitedAttributes(const SourceRange &Range,
SourceLocation FixItLoc);
// Forbid C++11 and C2x attributes that appear on certain syntactic locations
// which standard permits but we don't supported yet, for example, attributes
// appertain to decl specifiers.
void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
unsigned DiagID);
/// Skip C++11 and C2x attributes and return the end location of the
/// last one.
/// \returns SourceLocation() if there are no attributes.
SourceLocation SkipCXX11Attributes();
/// Diagnose and skip C++11 and C2x attributes that appear in syntactic
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
/// Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
/// list with the given attribute syntax. Returns the number of arguments
/// parsed for the attribute.
unsigned
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributes attrs(AttrFactory);
SourceLocation endLoc;
ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute))
ParseGNUAttributes(attrs, endLoc, LateAttrs);
}
void ParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax, Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
unsigned
ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseCXX11Attributes(Declarator &D) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation endLoc;
ParseCXX11Attributes(attrs, &endLoc);
D.takeAttributes(attrs, endLoc);
}
}
bool MaybeParseCXX11Attributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
return true;
}
return false;
}
void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
bool OuterMightBeMessageSend = false) {
if (standardAttributesAllowed() &&
isCXX11AttributeSpecifier(false, OuterMightBeMessageSend))
ParseCXX11Attributes(attrs, endLoc);
}
void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
SourceLocation *EndLoc = nullptr);
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = nullptr);
/// Parses a C++11 (or C2x)-style attribute argument list. Returns true
/// if this results in adding an attribute to the ParsedAttributes list.
bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc);
IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(attrs, endLoc);
}
void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs);
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr);
void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr) {
const auto &LO = getLangOpts();
if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec))
ParseMicrosoftDeclSpecs(Attrs, End);
}
void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr);
bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
/// Parses opencl_unroll_hint attribute if language is OpenCL v2.0
/// or higher.
/// \return false if error happens.
bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) {
if (getLangOpts().OpenCL)
return ParseOpenCLUnrollHintAttribute(Attrs);
return true;
}
/// Parses opencl_unroll_hint attribute.
/// \return false if error happens.
bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
VersionTuple ParseVersionTuple(SourceRange &Range);
void ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation AvailabilityLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
Optional<AvailabilitySpec> ParseAvailabilitySpec();
ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol,
SourceLocation Loc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseSwiftNewTypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void
ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc, ParsedAttributes &Attrs,
SourceLocation *EndLoc, IdentifierInfo *ScopeName,
SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
ExprResult ParseAlignArgument(SourceLocation Start,
SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
ExprResult ParseExtIntegerArgument();
void ParsePtrauthQualifier(ParsedAttributes &Attrs);
VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const;
VirtSpecifiers::Specifier isCXX11VirtSpecifier() const {
return isCXX11VirtSpecifier(Tok);
}
void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface,
SourceLocation FriendLoc);
bool isCXX11FinalKeyword() const;
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
/// finished.
class DeclaratorScopeObj {
Parser &P;
CXXScopeSpec &SS;
bool EnteredScope;
bool CreatedScope;
public:
DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss)
: P(p), SS(ss), EnteredScope(false), CreatedScope(false) {}
void EnterDeclaratorScope() {
assert(!EnteredScope && "Already entered the scope!");
assert(SS.isSet() && "C++ scope was not set!");
CreatedScope = true;
P.EnterScope(0); // Not a decl scope.
if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
EnteredScope = true;
}
~DeclaratorScopeObj() {
if (EnteredScope) {
assert(SS.isSet() && "C++ scope was cleared ?");
P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
}
if (CreatedScope)
P.ExitScope();
}
};
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
void ParseDeclarator(Declarator &D);
/// A function that parses a variant of direct-declarator.
typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
void ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser);
enum AttrRequirements {
AR_NoAttributesParsed = 0, ///< No attributes are diagnosed.
AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes.
AR_GNUAttributesParsed = 1 << 1,
AR_CXX11AttributesParsed = 1 << 2,
AR_DeclspecAttributesParsed = 1 << 3,
AR_AllAttributesParsed = AR_GNUAttributesParsed |
AR_CXX11AttributesParsed |
AR_DeclspecAttributesParsed,
AR_VendorAttributesParsed = AR_GNUAttributesParsed |
AR_DeclspecAttributesParsed
};
void ParseTypeQualifierListOpt(
DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed,
bool AtomicAllowed = true, bool IdentifierRequired = false,
Optional<llvm::function_ref<void()>> CodeCompletionHandler = None);
void ParseDirectDeclarator(Declarator &D);
void ParseDecompositionDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
void ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker,
bool IsAmbiguous,
bool RequiresArg = false);
void InitCXXThisScopeForDeclaratorIfRelevant(
const Declarator &D, const DeclSpec &DS,
llvm::Optional<Sema::CXXThisScopeRAII> &ThisScope);
bool ParseRefQualifier(bool &RefQualifierIsLValueRef,
SourceLocation &RefQualifierLoc);
bool isFunctionDeclaratorIdentifierList();
void ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo);
void ParseParameterDeclarationClause(
DeclaratorContext DeclaratorContext,
ParsedAttributes &attrs,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
SourceLocation &EllipsisLoc);
void ParseBracketDeclarator(Declarator &D);
void ParseMisplacedBracketDeclarator(Declarator &D);
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
/// The kind of attribute specifier we have found.
enum CXX11AttributeKind {
/// This is not an attribute specifier.
CAK_NotAttributeSpecifier,
/// This should be treated as an attribute-specifier.
CAK_AttributeSpecifier,
/// The next tokens are '[[', but this is not an attribute-specifier. This
/// is ill-formed by C++11 [dcl.attr.grammar]p6.
CAK_InvalidAttributeSpecifier
};
CXX11AttributeKind
isCXX11AttributeSpecifier(bool Disambiguate = false,
bool OuterMightBeMessageSend = false);
void DiagnoseUnexpectedNamespace(NamedDecl *Context);
DeclGroupPtrTy ParseNamespace(DeclaratorContext Context,
SourceLocation &DeclEnd,
SourceLocation InlineLoc = SourceLocation());
struct InnerNamespaceInfo {
SourceLocation NamespaceLoc;
SourceLocation InlineLoc;
SourceLocation IdentLoc;
IdentifierInfo *Ident;
};
using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>;
void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs,
unsigned int index, SourceLocation &InlineLoc,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker);
Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context);
Decl *ParseExportDeclaration();
DeclGroupPtrTy ParseUsingDirectiveOrDeclaration(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs);
Decl *ParseUsingDirective(DeclaratorContext Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributes &attrs);
struct UsingDeclarator {
SourceLocation TypenameLoc;
CXXScopeSpec SS;
UnqualifiedId Name;
SourceLocation EllipsisLoc;
void clear() {
TypenameLoc = EllipsisLoc = SourceLocation();
SS.clear();
Name.clear();
}
};
bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D);
DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none);
Decl *ParseAliasDeclarationAfterDeclarator(
const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc,
UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS,
ParsedAttributes &Attrs, Decl **OwnedType = nullptr);
Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc,
SourceLocation AliasLoc, IdentifierInfo *Alias,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
bool isValidAfterTypeSpecifier(bool CouldBeBitfield);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
DeclSpecContext DSC,
ParsedAttributesWithRange &Attributes);
void SkipCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
unsigned TagType,
Decl *TagDecl);
void ParseCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
ParsedAttributesWithRange &Attrs,
unsigned TagType,
Decl *TagDecl);
ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
bool
ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo,
VirtSpecifiers &VS,
ExprResult &BitfieldSize,
LateParsedAttrList &LateAttrs);
void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D,
VirtSpecifiers &VS);
DeclGroupPtrTy ParseCXXClassMemberDeclaration(
AccessSpecifier AS, ParsedAttributes &Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ParsingDeclRAIIObject *DiagsFromTParams = nullptr);
DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(
AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
DeclSpec::TST TagType, Decl *Tag);
void ParseConstructorInitializer(Decl *ConstructorDecl);
MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
Decl *ThisDecl);
//===--------------------------------------------------------------------===//
// C++ 10: Derived classes [class.derived]
TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
SourceLocation &EndLocation);
void ParseBaseClause(Decl *ClassDecl);
BaseResult ParseBaseSpecifier(Decl *ClassDecl);
AccessSpecifier getAccessSpecifierIfPresent() const;
bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
ParsedType ObjectType,
bool ObjectHadErrors,
SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
UnqualifiedId &Id,
bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
//===--------------------------------------------------------------------===//
// OpenMP: Directives and clauses.
/// Parse clauses for '#pragma omp declare simd'.
DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr,
CachedTokens &Toks,
SourceLocation Loc);
/// Parse a property kind into \p TIProperty for the selector set \p Set and
/// selector \p Selector.
void parseOMPTraitPropertyKind(OMPTraitProperty &TIProperty,
llvm::omp::TraitSet Set,
llvm::omp::TraitSelector Selector,
llvm::StringMap<SourceLocation> &Seen);
/// Parse a selector kind into \p TISelector for the selector set \p Set.
void parseOMPTraitSelectorKind(OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &Seen);
/// Parse a selector set kind into \p TISet.
void parseOMPTraitSetKind(OMPTraitSet &TISet,
llvm::StringMap<SourceLocation> &Seen);
/// Parses an OpenMP context property.
void parseOMPContextProperty(OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &Seen);
/// Parses an OpenMP context selector.
void parseOMPContextSelector(OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &SeenSelectors);
/// Parses an OpenMP context selector set.
void parseOMPContextSelectorSet(OMPTraitSet &TISet,
llvm::StringMap<SourceLocation> &SeenSets);
/// Parses OpenMP context selectors.
bool parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo &TI);
/// Parse a `match` clause for an '#pragma omp declare variant'. Return true
/// if there was an error.
bool parseOMPDeclareVariantMatchClause(SourceLocation Loc, OMPTraitInfo &TI,
OMPTraitInfo *ParentTI);
/// Parse clauses for '#pragma omp declare variant'.
void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks,
SourceLocation Loc);
/// Parse clauses for '#pragma omp declare target'.
DeclGroupPtrTy ParseOMPDeclareTargetClauses();
/// Parse '#pragma omp end declare target'.
void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind,
SourceLocation Loc);
/// Skip tokens until a `annot_pragma_openmp_end` was found. Emit a warning if
/// it is not the current token.
void skipUntilPragmaOpenMPEnd(OpenMPDirectiveKind DKind);
/// Check the \p FoundKind against the \p ExpectedKind, if not issue an error
/// that the "end" matching the "begin" directive of kind \p BeginKind was not
/// found. Finally, if the expected kind was found or if \p SkipUntilOpenMPEnd
/// is set, skip ahead using the helper `skipUntilPragmaOpenMPEnd`.
void parseOMPEndDirective(OpenMPDirectiveKind BeginKind,
OpenMPDirectiveKind ExpectedKind,
OpenMPDirectiveKind FoundKind,
SourceLocation MatchingLoc,
SourceLocation FoundLoc,
bool SkipUntilOpenMPEnd);
/// Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
bool Delayed = false, DeclSpec::TST TagType = DeclSpec::TST_unspecified,
Decl *TagDecl = nullptr);
/// Parse 'omp declare reduction' construct.
DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS);
/// Parses initializer for provided omp_priv declaration inside the reduction
/// initializer.
void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm);
/// Parses 'omp declare mapper' directive.
DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS);
/// Parses variable declaration in 'omp declare mapper' directive.
TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range,
DeclarationName &Name,
AccessSpecifier AS = AS_none);
/// Tries to parse cast part of OpenMP array shaping operation:
/// '[' expression ']' { '[' expression ']' } ')'.
bool tryParseOpenMPArrayShapingCastPart();
/// Parses simple list of variables.
///
/// \param Kind Kind of the directive.
/// \param Callback Callback function to be called for the list elements.
/// \param AllowScopeSpecifier true, if the variables can have fully
/// qualified names.
///
bool ParseOpenMPSimpleVarList(
OpenMPDirectiveKind Kind,
const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> &
Callback,
bool AllowScopeSpecifier);
/// Parses declarative or executable directive.
///
/// \param StmtCtx The context in which we're parsing the directive.
StmtResult
ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx);
/// Parses clause of kind \a CKind for directive of a kind \a Kind.
///
/// \param DKind Kind of current directive.
/// \param CKind Kind of current clause.
/// \param FirstClause true, if this is the first clause of a kind \a CKind
/// in current directive.
///
OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause);
/// Parses clause with a single expression of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses simple clause of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly);
/// Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
/// \param DKind Directive kind.
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses clause without any additional arguments.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false);
/// Parses clause with the list of variables of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind, bool ParseOnly);
/// Parses and creates OpenMP 5.0 iterators expression:
/// <iterators> = 'iterator' '(' { [ <iterator-type> ] identifier =
/// <range-specification> }+ ')'
ExprResult ParseOpenMPIteratorsExpr();
/// Parses allocators and traits in the context of the uses_allocator clause.
/// Expected format:
/// '(' { <allocator> [ '(' <allocator_traits> ')' ] }+ ')'
OMPClause *ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind);
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
/// \param RLoc Returned location of right paren.
ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc,
bool IsAddressOfOperand = false);
/// Data used for parsing list of variables in OpenMP clauses.
struct OpenMPVarListDataTy {
Expr *DepModOrTailExpr = nullptr;
SourceLocation ColonLoc;
SourceLocation RLoc;
CXXScopeSpec ReductionOrMapperIdScopeSpec;
DeclarationNameInfo ReductionOrMapperId;
int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or
///< lastprivate clause.
SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers>
MapTypeModifiers;
SmallVector<SourceLocation, NumberOfOMPMapClauseModifiers>
MapTypeModifiersLoc;
SmallVector<OpenMPMotionModifierKind, NumberOfOMPMotionModifiers>
MotionModifiers;
SmallVector<SourceLocation, NumberOfOMPMotionModifiers> MotionModifiersLoc;
bool IsMapTypeImplicit = false;
SourceLocation ExtraModifierLoc;
};
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
OpenMPVarListDataTy &Data);
bool ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType,
bool ObjectHadErrors, bool EnteringContext,
bool AllowDestructorName, bool AllowConstructorName,
bool AllowDeductionGuide,
SourceLocation *TemplateKWLoc, UnqualifiedId &Result);
/// Parses the mapper modifier in map, to, and from clauses.
bool parseMapperModifier(OpenMPVarListDataTy &Data);
/// Parses map-type-modifiers in map clause.
/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list)
/// where, map-type-modifier ::= always | close | mapper(mapper-identifier)
bool parseMapTypeModifiers(OpenMPVarListDataTy &Data);
private:
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
// C++ 14.1: Template Parameters [temp.param]
Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS);
Decl *ParseSingleDeclarationAfterTemplate(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none);
bool ParseTemplateParameters(MultiParseScope &TemplateScopes, unsigned Depth,
SmallVectorImpl<NamedDecl *> &TemplateParams,
SourceLocation &LAngleLoc,
SourceLocation &RAngleLoc);
bool ParseTemplateParameterList(unsigned Depth,
SmallVectorImpl<NamedDecl*> &TemplateParams);
TPResult isStartOfTemplateTypeParameter();
NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position);
bool isTypeConstraintAnnotation();
bool TryAnnotateTypeConstraint();
NamedDecl *
ParseConstrainedTemplateTypeParameter(unsigned Depth, unsigned Position);
void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc,
SourceLocation CorrectLoc,
bool AlreadyHasEllipsis,
bool IdentifierHasName);
void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
Declarator &D);
// C++ 14.3: Template arguments [temp.arg]
typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
bool ParseGreaterThanInTemplateList(SourceLocation LAngleLoc,
SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList);
bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
SourceLocation &RAngleLoc);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &TemplateName,
bool AllowTypeAnnotation = true,
bool TypeConstraint = false);
void AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS,
bool IsClassName = false);
bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(DeclaratorContext Context,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
// C++2a: Template, concept definition [temp]
Decl *
ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// Modules
DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl);
Decl *ParseModuleImport(SourceLocation AtLoc);
bool parseMisplacedModuleImport();
bool tryParseMisplacedModuleImport() {
tok::TokenKind Kind = Tok.getKind();
if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end ||
Kind == tok::annot_module_include)
return parseMisplacedModuleImport();
return false;
}
bool ParseModuleName(
SourceLocation UseLoc,
SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path,
bool IsImport);
//===--------------------------------------------------------------------===//
// C++11/G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseTypeTrait();
/// Parse the given string as a type.
///
/// This is a dangerous utility function currently employed only by API notes.
/// It is not a general entry-point for safely parsing types from strings.
///
/// \param typeStr The string to be parsed as a type.
/// \param context The name of the context in which this string is being
/// parsed, which will be used in diagnostics.
/// \param includeLoc The location at which this parse was triggered.
TypeResult parseTypeFromString(StringRef typeStr, StringRef context,
SourceLocation includeLoc);
//===--------------------------------------------------------------------===//
// Embarcadero: Arary and Expression Traits
ExprResult ParseArrayTypeTrait();
ExprResult ParseExpressionTrait();
ExprResult ParseBuiltinPtrauthTypeDiscriminator();
//===--------------------------------------------------------------------===//
// Preprocessor code-completion pass-through
void CodeCompleteDirective(bool InConditional) override;
void CodeCompleteInConditionalExclusion() override;
void CodeCompleteMacroName(bool IsDefinition) override;
void CodeCompletePreprocessorExpression() override;
void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo,
unsigned ArgumentIndex) override;
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override;
void CodeCompleteNaturalLanguage() override;
class GNUAsmQualifiers {
unsigned Qualifiers = AQ_unspecified;
public:
enum AQ {
AQ_unspecified = 0,
AQ_volatile = 1,
AQ_inline = 2,
AQ_goto = 4,
};
static const char *getQualifierName(AQ Qualifier);
bool setAsmQualifier(AQ Qualifier);
inline bool isVolatile() const { return Qualifiers & AQ_volatile; };
inline bool isInline() const { return Qualifiers & AQ_inline; };
inline bool isGoto() const { return Qualifiers & AQ_goto; }
};
bool isGCCAsmStatement(const Token &TokAfterAsm) const;
bool isGNUAsmQualifier(const Token &TokAfterAsm) const;
GNUAsmQualifiers::AQ getGNUAsmQualifier(const Token &Tok) const;
bool parseGNUAsmQualifierListOpt(GNUAsmQualifiers &AQ);
};
} // end namespace clang
#endif
|
GiRaFFE_boundary_conditions.h |
// Currently, we're using basic Cartesian boundary conditions, pending fixes by Zach.
// Part P8a: Declare boundary condition FACE_UPDATE macro,
// which updates a single face of the 3D grid cube
// using quadratic polynomial extrapolation.
// Basic extrapolation boundary conditions
#define FACE_UPDATE(which_gf, i0min,i0max, i1min,i1max, i2min,i2max, FACEX0,FACEX1,FACEX2) \
for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) { \
gfs[IDX4(which_gf,i0,i1,i2)] = \
+2.0*gfs[IDX4(which_gf,i0+1*FACEX0,i1+1*FACEX1,i2+1*FACEX2)] \
-1.0*gfs[IDX4(which_gf,i0+2*FACEX0,i1+2*FACEX1,i2+2*FACEX2)]; \
}
// +1.0*gfs[IDX4(which_gf,i0+3*FACEX0,i1+3*FACEX1,i2+3*FACEX2)]; \
// Basic Copy boundary conditions
#define FACE_UPDATE_COPY(which_gf, i0min,i0max, i1min,i1max, i2min,i2max, FACEX0,FACEX1,FACEX2) \
for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) { \
gfs[IDX4(which_gf,i0,i1,i2)] = gfs[IDX4(which_gf,i0+1*FACEX0,i1+1*FACEX1,i2+1*FACEX2)]; \
}
// Part P8b: Boundary condition driver routine: Apply BCs to all six
// boundary faces of the cube, filling in the innermost
// ghost zone first, and moving outward.
const int MAXFACE = -1;
const int NUL = +0;
const int MINFACE = +1;// This macro acts differently in that it acts on an entire 3-vector of gfs, instead of 1.
// which_gf_0 corresponds to the zeroth component of that vector. The if statements only
// evaluate true if the velocity is directed inwards on the face in consideration.
#define FACE_UPDATE_OUTFLOW(which_gf, i0min,i0max, i1min,i1max, i2min,i2max, FACEX0,FACEX1,FACEX2) \
for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) { \
aux_gfs[IDX4(which_gf,i0,i1,i2)] = \
+2.0*aux_gfs[IDX4(which_gf,i0+1*FACEX0,i1+1*FACEX1,i2+1*FACEX2)] \
-1.0*aux_gfs[IDX4(which_gf,i0+2*FACEX0,i1+2*FACEX1,i2+2*FACEX2)]; \
}
/* aux_gfs[IDX4(which_gf_0+1,i0,i1,i2)] = \
+3.0*aux_gfs[IDX4(which_gf_0+1,i0+1*FACEX0,i1+1*FACEX1,i2+1*FACEX2)] \
-3.0*aux_gfs[IDX4(which_gf_0+1,i0+2*FACEX0,i1+2*FACEX1,i2+2*FACEX2)] \
+1.0*aux_gfs[IDX4(which_gf_0+1,i0+3*FACEX0,i1+3*FACEX1,i2+3*FACEX2)]; \
aux_gfs[IDX4(which_gf_0+2,i0,i1,i2)] = \
+3.0*aux_gfs[IDX4(which_gf_0+2,i0+1*FACEX0,i1+1*FACEX1,i2+1*FACEX2)] \
-3.0*aux_gfs[IDX4(which_gf_0+2,i0+2*FACEX0,i1+2*FACEX1,i2+2*FACEX2)] \
+1.0*aux_gfs[IDX4(which_gf_0+2,i0+3*FACEX0,i1+3*FACEX1,i2+3*FACEX2)]; \
if(FACEX0*aux_gfs[IDX4(which_gf_0+0,i0,i1,i2)] > 0.0) { \
aux_gfs[IDX4(which_gf_0+0,i0,i1,i2)] = 0.0; \
} \
if(FACEX1*aux_gfs[IDX4(which_gf_0+1,i0,i1,i2)] > 0.0) { \
aux_gfs[IDX4(which_gf_0+1,i0,i1,i2)] = 0.0; \
} \
if(FACEX2*aux_gfs[IDX4(which_gf_0+2,i0,i1,i2)] > 0.0) { \
aux_gfs[IDX4(which_gf_0+2,i0,i1,i2)] = 0.0; \
} \
*/
void apply_bcs(const int Nxx[3],const int Nxx_plus_2NGHOSTS[3],REAL *gfs,REAL *aux_gfs) {
// First, we apply extrapolation boundary conditions to AD
#pragma omp parallel for
for(int which_gf=0;which_gf<NUM_EVOL_GFS;which_gf++) {
if(which_gf < STILDED0GF || which_gf > STILDED2GF) {
int imin[3] = { NGHOSTS, NGHOSTS, NGHOSTS };
int imax[3] = { Nxx_plus_2NGHOSTS[0]-NGHOSTS, Nxx_plus_2NGHOSTS[1]-NGHOSTS, Nxx_plus_2NGHOSTS[2]-NGHOSTS };
for(int which_gz = 0; which_gz < NGHOSTS; which_gz++) {
// After updating each face, adjust imin[] and imax[]
// to reflect the newly-updated face extents.
FACE_UPDATE(which_gf, imin[0]-1,imin[0], imin[1],imax[1], imin[2],imax[2], MINFACE,NUL,NUL); imin[0]--;
FACE_UPDATE(which_gf, imax[0],imax[0]+1, imin[1],imax[1], imin[2],imax[2], MAXFACE,NUL,NUL); imax[0]++;
FACE_UPDATE(which_gf, imin[0],imax[0], imin[1]-1,imin[1], imin[2],imax[2], NUL,MINFACE,NUL); imin[1]--;
FACE_UPDATE(which_gf, imin[0],imax[0], imax[1],imax[1]+1, imin[2],imax[2], NUL,MAXFACE,NUL); imax[1]++;
FACE_UPDATE(which_gf, imin[0],imax[0], imin[1],imax[1], imin[2]-1,imin[2], NUL,NUL,MINFACE);
imin[2]--;
FACE_UPDATE(which_gf, imin[0],imax[0], imin[1],imax[1], imax[2],imax[2]+1, NUL,NUL,MAXFACE);
imax[2]++;
}
}
} // Apply outflow/extrapolation boundary conditions to ValenciavU by passing VALENCIAVU0 as which_gf_0
for(int which_gf=VALENCIAVU0GF;which_gf<=VALENCIAVU2GF;which_gf++) {
int imin[3] = { NGHOSTS, NGHOSTS, NGHOSTS };
int imax[3] = { Nxx_plus_2NGHOSTS[0]-NGHOSTS, Nxx_plus_2NGHOSTS[1]-NGHOSTS, Nxx_plus_2NGHOSTS[2]-NGHOSTS };
for(int which_gz = 0; which_gz < NGHOSTS; which_gz++) {
FACE_UPDATE_OUTFLOW(which_gf, imin[0]-1,imin[0], imin[1],imax[1], imin[2],imax[2], MINFACE,NUL,NUL); imin[0]--;
FACE_UPDATE_OUTFLOW(which_gf, imax[0],imax[0]+1, imin[1],imax[1], imin[2],imax[2], MAXFACE,NUL,NUL); imax[0]++;
FACE_UPDATE_OUTFLOW(which_gf, imin[0],imax[0], imin[1]-1,imin[1], imin[2],imax[2], NUL,MINFACE,NUL); imin[1]--;
FACE_UPDATE_OUTFLOW(which_gf, imin[0],imax[0], imax[1],imax[1]+1, imin[2],imax[2], NUL,MAXFACE,NUL); imax[1]++;
FACE_UPDATE_OUTFLOW(which_gf, imin[0],imax[0], imin[1],imax[1], imin[2]-1,imin[2], NUL,NUL,MINFACE);
imin[2]--;
FACE_UPDATE_OUTFLOW(which_gf, imin[0],imax[0], imin[1],imax[1], imax[2],imax[2]+1, NUL,NUL,MAXFACE);
imax[2]++;
}
} // Then, we apply copy boundary conditions to StildeD and psi6Phi
/*#pragma omp parallel for
for(int which_gf=3;which_gf<NUM_EVOL_GFS;which_gf++) {
int imin[3] = { NGHOSTS, NGHOSTS, NGHOSTS };
int imax[3] = { Nxx_plus_2NGHOSTS[0]-NGHOSTS, Nxx_plus_2NGHOSTS[1]-NGHOSTS, Nxx_plus_2NGHOSTS[2]-NGHOSTS };
for(int which_gz = 0; which_gz < NGHOSTS; which_gz++) {
// After updating each face, adjust imin[] and imax[]
// to reflect the newly-updated face extents.
FACE_UPDATE_COPY(which_gf, imin[0]-1,imin[0], imin[1],imax[1], imin[2],imax[2], MINFACE,NUL,NUL); imin[0]--;
FACE_UPDATE_COPY(which_gf, imax[0],imax[0]+1, imin[1],imax[1], imin[2],imax[2], MAXFACE,NUL,NUL); imax[0]++;
FACE_UPDATE_COPY(which_gf, imin[0],imax[0], imin[1]-1,imin[1], imin[2],imax[2], NUL,MINFACE,NUL); imin[1]--;
FACE_UPDATE_COPY(which_gf, imin[0],imax[0], imax[1],imax[1]+1, imin[2],imax[2], NUL,MAXFACE,NUL); imax[1]++;
FACE_UPDATE_COPY(which_gf, imin[0],imax[0], imin[1],imax[1], imin[2]-1,imin[2], NUL,NUL,MINFACE); imin[2]--;
FACE_UPDATE_COPY(which_gf, imin[0],imax[0], imin[1],imax[1], imax[2],imax[2]+1, NUL,NUL,MAXFACE); imax[2]++;
}
}*/
}// A supplement to the boundary conditions for debugging. This will overwrite data with exact conditions
void FACE_UPDATE_EXACT(const int Nxx[3],const int Nxx_plus_2NGHOSTS[3],REAL *xx[3],
const int n, const REAL dt,REAL *out_gfs,REAL *aux_gfs,
const int i0min,const int i0max, const int i1min,const int i1max, const int i2min,const int i2max,
const int FACEX0,const int FACEX1,const int FACEX2) {
for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) {
REAL xx0 = xx[0][i0]-n*dt;
REAL xx1 = xx[1][i1];
REAL xx2 = xx[2][i2];
if(xx0<=lbound) {
#include "GiRaFFEfood_A_v_1D_tests_left.h"
}
else if (xx0<rbound) {
#include "GiRaFFEfood_A_v_1D_tests_center.h"
}
else {
#include "GiRaFFEfood_A_v_1D_tests_right.h"
}
out_gfs[IDX4(PSI6PHIGF, i0,i1,i2)] = 0.0;
}
}void apply_bcs_EXACT(const int Nxx[3],const int Nxx_plus_2NGHOSTS[3],REAL *xx[3],
const int n, const REAL dt,
REAL *out_gfs,REAL *aux_gfs) {
int imin[3] = { NGHOSTS, NGHOSTS, NGHOSTS };
int imax[3] = { Nxx_plus_2NGHOSTS[0]-NGHOSTS, Nxx_plus_2NGHOSTS[1]-NGHOSTS, Nxx_plus_2NGHOSTS[2]-NGHOSTS };
for(int which_gz = 0; which_gz < NGHOSTS; which_gz++) {
// After updating each face, adjust imin[] and imax[]
// to reflect the newly-updated face extents.
// Right now, we only want to update the xmin and xmax faces with the exact data.
FACE_UPDATE_EXACT(Nxx,Nxx_plus_2NGHOSTS,xx,n,dt,out_gfs,aux_gfs,imin[0]-1,imin[0], imin[1],imax[1], imin[2],imax[2], MINFACE,NUL,NUL);
imin[0]--;
FACE_UPDATE_EXACT(Nxx,Nxx_plus_2NGHOSTS,xx,n,dt,out_gfs,aux_gfs,imax[0],imax[0]+1, imin[1],imax[1], imin[2],imax[2], MAXFACE,NUL,NUL);
imax[0]++;
FACE_UPDATE_EXACT(Nxx,Nxx_plus_2NGHOSTS,xx,n,dt,out_gfs,aux_gfs,imin[0],imax[0], imin[1]-1,imin[1], imin[2],imax[2], NUL,MINFACE,NUL);
imin[1]--;
FACE_UPDATE_EXACT(Nxx,Nxx_plus_2NGHOSTS,xx,n,dt,out_gfs,aux_gfs,imin[0],imax[0], imax[1],imax[1]+1, imin[2],imax[2], NUL,MAXFACE,NUL);
imax[1]++;
FACE_UPDATE_EXACT(Nxx,Nxx_plus_2NGHOSTS,xx,n,dt,out_gfs,aux_gfs,imin[0],imax[0], imin[1],imax[1], imin[2]-1,imin[2], NUL,NUL,MINFACE);
imin[2]--;
FACE_UPDATE_EXACT(Nxx,Nxx_plus_2NGHOSTS,xx,n,dt,out_gfs,aux_gfs,imin[0],imax[0], imin[1],imax[1], imax[2],imax[2]+1, NUL,NUL,MAXFACE);
imax[2]++;
}
}// A supplement to the boundary conditions for debugging. This will overwrite data with exact conditions
void FACE_UPDATE_EXACT_StildeD(const int Nxx[3],const int Nxx_plus_2NGHOSTS[3],REAL *xx[3],
REAL *out_gfs,REAL *out_gfs_exact,
const int i0min,const int i0max, const int i1min,const int i1max, const int i2min,const int i2max,
const int FACEX0,const int FACEX1,const int FACEX2) {
// This is currently modified to calculate more exact boundary conditions for StildeD. Rename if it works.
/*for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) {
#include "GiRaFFEfood_HO_Stilde.h"
}*/
/*idx = IDX3(i0,i1,i2);
out_gfs[IDX4pt(STILDED0GF,idx)] = out_gfs_exact[IDX4pt(STILDED0GF,idx)];
out_gfs[IDX4pt(STILDED1GF,idx)] = out_gfs_exact[IDX4pt(STILDED1GF,idx)];
out_gfs[IDX4pt(STILDED2GF,idx)] = out_gfs_exact[IDX4pt(STILDED2GF,idx)];*/
}void apply_bcs_EXACT_StildeD(const int Nxx[3],const int Nxx_plus_2NGHOSTS[3],REAL *xx[3],
REAL *out_gfs,REAL *out_gfs_exact) {
int imin[3] = { NGHOSTS, NGHOSTS, NGHOSTS };
int imax[3] = { Nxx_plus_2NGHOSTS[0]-NGHOSTS, Nxx_plus_2NGHOSTS[1]-NGHOSTS, Nxx_plus_2NGHOSTS[2]-NGHOSTS };
for(int which_gz = 0; which_gz < NGHOSTS; which_gz++) {
// After updating each face, adjust imin[] and imax[]
// to reflect the newly-updated face extents.
// Right now, we only want to update the xmin and xmax faces with the exact data.
FACE_UPDATE_EXACT_StildeD(Nxx,Nxx_plus_2NGHOSTS,xx,out_gfs,out_gfs_exact,imin[0]-1,imin[0], imin[1],imax[1], imin[2],imax[2], MINFACE,NUL,NUL);
imin[0]--;
FACE_UPDATE_EXACT_StildeD(Nxx,Nxx_plus_2NGHOSTS,xx,out_gfs,out_gfs_exact,imax[0],imax[0]+1, imin[1],imax[1], imin[2],imax[2], MAXFACE,NUL,NUL);
imax[0]++;
//FACE_UPDATE_EXACT_StildeD(Nxx,Nxx_plus_2NGHOSTS,xx,out_gfs,out_gfs_exact,imin[0],imax[0], imin[1]-1,imin[1], imin[2],imax[2], NUL,MINFACE,NUL);
imin[1]--;
//FACE_UPDATE_EXACT_StildeD(Nxx,Nxx_plus_2NGHOSTS,xx,out_gfs,out_gfs_exact,imin[0],imax[0], imax[1],imax[1]+1, imin[2],imax[2], NUL,MAXFACE,NUL);
imax[1]++;
//FACE_UPDATE_EXACT_StildeD(Nxx,Nxx_plus_2NGHOSTS,xx,out_gfs,out_gfs_exact,imin[0],imax[0], imin[1],imax[1], imin[2]-1,imin[2], NUL,NUL,MINFACE);
imin[2]--;
//FACE_UPDATE_EXACT_StildeD(Nxx,Nxx_plus_2NGHOSTS,xx,out_gfs,out_gfs_exact,imin[0],imax[0], imin[1],imax[1], imax[2],imax[2]+1, NUL,NUL,MAXFACE);
imax[2]++;
}
} |
bins_dynamic_objects.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Nelson Lafontaine
// Carlos A. Roig
#if !defined(KRATOS_BINS_DYNAMIC_OBJECTS_CONTAINER_H_INCLUDED)
#define KRATOS_BINS_DYNAMIC_OBJECTS_CONTAINER_H_INCLUDED
// System includes
#include <string>
#include <iostream>
#include <cmath>
#include <algorithm>
#include <array>
// Project includes
#include "tree.h"
#include "cell.h"
#ifdef _OPENMP
#include <omp.h>
#endif
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/// Short class definition.
/** Detail class definition.
*/
template<class TConfigure>
class BinsObjectDynamic {
public:
///@name Type Definitions
///@{
enum { Dimension = TConfigure::Dimension };
typedef TConfigure Configure;
typedef typename TConfigure::PointType PointType;
typedef typename TConfigure::PointerType PointerType;
typedef typename TConfigure::ContainerType ContainerType;
typedef typename TConfigure::IteratorType IteratorType;
typedef typename TConfigure::ResultContainerType ResultContainerType;
typedef typename TConfigure::ResultIteratorType ResultIteratorType;
typedef typename TConfigure::DistanceIteratorType DistanceIteratorType;
typedef TreeNode<Dimension, PointType, PointerType, IteratorType, typename TConfigure::DistanceIteratorType> TreeNodeType;
typedef typename TreeNodeType::CoordinateType CoordinateType; // double
typedef typename TreeNodeType::SizeType SizeType; // std::size_t
typedef typename TreeNodeType::IndexType IndexType; // std::size_t
typedef Tvector<CoordinateType,Dimension> CoordinateArray;
typedef Tvector<SizeType,Dimension> SizeArray;
typedef Tvector<IndexType,Dimension> IndexArray;
///Contact Pair
typedef typename TConfigure::ContainerContactType ContainerContactType;
typedef typename TConfigure::IteratorContactType IteratorContactType;
///typedef TreeNodeType LeafType;
typedef typename TreeNodeType::IteratorIteratorType IteratorIteratorType;
typedef typename TreeNodeType::SearchStructureType SearchStructureType;
// Global Container
typedef Cell<Configure> CellType;
typedef std::vector<CellType> CellContainerType;
typedef typename CellContainerType::iterator CellContainerIterator;
/// Pointer definition of BinsObjectDynamic
KRATOS_CLASS_POINTER_DEFINITION(BinsObjectDynamic);
///@}
///@name Life Cycle
///@{
/// Default constructor.
BinsObjectDynamic() {}
/// Constructor de bins a bounding box
/**
* @brief Constructs a new BinsObjectDynamic
*
* Construct a new BinsObjectDynamic using a list of objects and an automatically calculate cell size.
*
* @param ObjectsBegin Iterator to the first object of the bins
* @param ObjectsEnd Iterator to the last object of the bins
*/
BinsObjectDynamic (IteratorType const& ObjectsBegin, IteratorType const& ObjectsEnd)
: mObjectsBegin(ObjectsBegin), mObjectsEnd(ObjectsEnd) {
mObjectsSize = SearchUtils::PointerDistance(mObjectsBegin,mObjectsEnd);
CalculateBoundingBox(); // Calculate mMinPoint, mMaxPoint
CalculateCellSize(mObjectsSize); // Calculate number of Cells
AllocateContainer(); // Allocate cell list
GenerateBins(); // Fill Cells with objects
}
/**
* @brief Constructs a new BinsObjectDynamic
*
* Constructs a new BinsObjectDynamic using a list of objects and an user provided cell size.
*
* @param ObjectsBegin Iterator to the first object of the bins
* @param ObjectsEnd Iterator to the last object of the bins
* @param CellSize Size of the cells (equal for every dimension)
*/
BinsObjectDynamic (IteratorType const& ObjectsBegin, IteratorType const& ObjectsEnd, CoordinateType CellSize)
: mObjectsBegin(ObjectsBegin), mObjectsEnd(ObjectsEnd) {
mObjectsSize = SearchUtils::PointerDistance(mObjectsBegin,mObjectsEnd);
CalculateBoundingBox(); // Calculate mMinPoint, mMaxPoint
AssignCellSize(CellSize); // Calculate number of Cells
AllocateContainer(); // Allocate cell list
GenerateBins(); // Fill Cells with objects
}
/**
* @brief Constructs a new BinsObjectDynamic
*
* Constructs a new BinsObjectDynamic using a given bounding box and a user provided cell size.
*
* @param MinPoint Min point of the boundingbox containing the bins
* @param MaxPoint Max point of the boundingbox containing the bins
* @param CellSize Size of the cells (equal for very dimension)
*/
BinsObjectDynamic (const PointType& MinPoint, const PointType& MaxPoint, CoordinateType CellSize)
: mObjectsSize(0), mObjectsBegin(0), mObjectsEnd(0) {
for(SizeType i = 0; i < Dimension; i++) {
mMinPoint[i] = MinPoint[i];
mMaxPoint[i] = MaxPoint[i];
}
AssignCellSize(CellSize); // Calculate number of Cells
AllocateContainer(); // Allocate cell list
}
/**
* @brief Constructs a new BinsObjectDynamic object
*
* Constructs a new BinsObjectDynamic using a given bounding box and a provided aproximation of the number
* of objects that will be added to the bins.
*
* @param MinPoint Min point of the boundingbox containing the bins
* @param MaxPoint Max point of the boundingbox containing the bins
* @param NumPoints Expected number of elements in the bins
*/
BinsObjectDynamic (const PointType& MinPoint, const PointType& MaxPoint, SizeType NumPoints)
: mObjectsSize(0), mObjectsBegin(0), mObjectsEnd(0) {
for(SizeType i = 0; i < Dimension; i++) {
mMinPoint[i] = MinPoint[i];
mMaxPoint[i] = MaxPoint[i];
}
CalculateCellSize(NumPoints); // Calculate number of Cells
AllocateContainer(); // Allocate cell list
}
/// Destructor.
virtual ~BinsObjectDynamic() {}
/// Single search API
/**
* [SearchObjects description]
* @param ThisObject [description]
* @param Result [description]
* @return [description]
*/
SizeType SearchObjects(PointerType& ThisObject, ResultContainerType& Result) {
PointType Low, High;
SearchStructureType Box;
TConfigure::CalculateBoundingBox(ThisObject, Low, High);
Box.Set( CalculateCell(Low), CalculateCell(High), mN );
SearchInBoxLocal(ThisObject, Result, Box );
return Result.size();
}
/**
* [SearchObjects description]
* @param ThisObject [description]
* @param Result [description]
* @param MaxNumberOfResults [description]
* @return [description]
*/
SizeType SearchObjects(PointerType& ThisObject, ResultIteratorType& Result, const SizeType& MaxNumberOfResults) {
PointType Low, High;
SearchStructureType Box;
SizeType NumberOfResults = 0;
TConfigure::CalculateBoundingBox(ThisObject, Low, High);
Box.Set( CalculateCell(Low), CalculateCell(High), mN );
SearchInBoxLocal(ThisObject, Result, NumberOfResults, MaxNumberOfResults, Box );
return NumberOfResults;
}
/**
* [SearchObjectsInCell description]
* @param ThisPoint [description]
* @param Result [description]
* @return [description]
*/
SizeType SearchObjectsInCell(const PointType& ThisPoint, ResultIteratorType Result) {
/// Missing API for 'SearchObjectsInCell' without 'MaxNumberOfResults'
KRATOS_ERROR << "Missing implementation of SearchObjectsInCell(PointerType, ResultIteratorType)" << std::endl;
}
/**
* [SearchObjectsInCell description]
* @param ThisPoint [description]
* @param Result [description]
* @param MaxNumberOfResults [description]
* @return [description]
*/
SizeType SearchObjectsInCell(const PointType& ThisPoint, ResultIteratorType Result, const SizeType& MaxNumberOfResults) {
IndexType icell = CalculateIndex(ThisPoint);
if(mCells[icell].Size() < MaxNumberOfResults) {
for(IteratorType i_object = mCells[icell].Begin() ; i_object != mCells[icell].End(); i_object++, Result++) {
*Result = *i_object;
}
return mCells[icell].Size();
} else {
return std::numeric_limits<SizeType>::max();
}
}
/**
* [SearchObjectsExclusive description]
* @param ThisObject [description]
* @param Result [description]
* @param MaxNumberOfResults [description]
* @return [description]
*/
SizeType SearchObjectsExclusive(PointerType& ThisObject, ResultIteratorType& Result) {
PointType Low, High;
SearchStructureType Box;
TConfigure::CalculateBoundingBox(ThisObject, Low, High);
Box.Set( CalculateCell(Low), CalculateCell(High), mN );
SearchObjectLocalExclusive(ThisObject, Result, Box );
return Result.size();
}
/**
* [SearchObjectsExclusive description]
* @param ThisObject [description]
* @param Result [description]
* @param MaxNumberOfResults [description]
* @return [description]
*/
SizeType SearchObjectsExclusive(PointerType& ThisObject, ResultIteratorType& Result, const SizeType& MaxNumberOfResults) {
PointType Low, High;
SearchStructureType Box;
SizeType NumberOfResults = 0;
TConfigure::CalculateBoundingBox(ThisObject, Low, High);
Box.Set( CalculateCell(Low), CalculateCell(High), mN );
SearchObjectLocalExclusive(ThisObject, Result, NumberOfResults, MaxNumberOfResults, Box );
return NumberOfResults;
}
/**
* [SearchObjectsInRadius description]
* @param ThisObject [description]
* @param Radius [description]
* @param Results [description]
* @return [description]
*/
SizeType SearchObjectsInRadius(PointerType& ThisObject, const double& Radius, ResultIteratorType& Results) {
/// Missing API for 'SearchObjectsInRadius' without 'MaxNumberOfResults'
KRATOS_ERROR << "Missing implementation of SearchObjectsInRadius(PointerType, const double, ResultIteratorType)" << std::endl;
}
/**
* [SearchObjectsInRadius description]
* @param ThisObject [description]
* @param Radius [description]
* @param Results [description]
* @param MaxNumberOfResults [description]
* @return [description]
*/
SizeType SearchObjectsInRadius(PointerType& ThisObject, const double& Radius, ResultIteratorType& Results, const SizeType& MaxNumberOfResults) {
PointType Low, High;
SearchStructureType Box;
SizeType NumberOfResults = 0;
TConfigure::CalculateBoundingBox(ThisObject, Low, High, Radius);
Box.Set( CalculateCell(Low), CalculateCell(High), mN );
SearchInRadius(ThisObject, Radius, Results, NumberOfResults, MaxNumberOfResults, Box );
return NumberOfResults;
}
/**
* [SearchObjectsInRadius description]
* @param ThisObject [description]
* @param Radius [description]
* @param Results [description]
* @param ResultDistances [description]
* @return [description]
*/
SizeType SearchObjectsInRadius(PointerType& ThisObject, const double& Radius, ResultIteratorType& Results, DistanceIteratorType ResultDistances) {
/// Missing API for 'SearchObjectsInRadius' without 'MaxNumberOfResults'
KRATOS_ERROR << "Missing implementation of SearchObjectsInRadius(PointerType, const double, ResultIteratorType, DistanceIteratorType)" << std::endl;
}
/**
* [SearchObjectsInRadius description]
* @param ThisObject [description]
* @param Radius [description]
* @param Results [description]
* @param ResultDistances [description]
* @param MaxNumberOfResults [description]
* @return [description]
*/
SizeType SearchObjectsInRadius(PointerType& ThisObject, const double& Radius, ResultIteratorType& Results, DistanceIteratorType ResultDistances, const SizeType& MaxNumberOfResults) {
PointType Low, High;
SearchStructureType Box;
SizeType NumberOfResults = 0;
TConfigure::CalculateBoundingBox(ThisObject, Low, High, Radius);
Box.Set( CalculateCell(Low), CalculateCell(High), mN );
SearchInRadius(ThisObject, Radius, Results, ResultDistances, NumberOfResults, MaxNumberOfResults, Box );
return NumberOfResults;
}
/**
* [SearchObjectsInRadiusExclusive description]
* @param ThisObject [description]
* @param Radius [description]
* @param Results [description]
* @param MaxNumberOfResults [description]
* @return [description]
*/
virtual SizeType SearchObjectsInRadiusExclusive(PointerType& ThisObject, const double& Radius, ResultIteratorType& Results) {
/// Missing API for 'SearchObjectsInRadiusExclusive' without 'MaxNumberOfResults'
KRATOS_ERROR << "Missing implementation of SearchObjectsInRadiusExclusive(PointerType, const double, ResultIteratorType)" << std::endl;
}
/**
* [SearchObjectsInRadiusExclusive description]
* @param ThisObject [description]
* @param Radius [description]
* @param Results [description]
* @param MaxNumberOfResults [description]
* @return [description]
*/
virtual SizeType SearchObjectsInRadiusExclusive(PointerType& ThisObject, const double& Radius, ResultIteratorType& Results, const SizeType& MaxNumberOfResults) {
PointType Low, High;
SearchStructureType Box;
SizeType NumberOfResults = 0;
TConfigure::CalculateBoundingBox(ThisObject, Low, High, Radius);
Box.Set( CalculateCell(Low), CalculateCell(High), mN );
SearchInRadiusExclusive(ThisObject, Radius, Results, NumberOfResults, MaxNumberOfResults, Box );
return NumberOfResults;
}
/**
* [SearchObjectsInRadiusExclusive description]
* @param ThisObject [description]
* @param Radius [description]
* @param Results [description]
* @param ResultDistances [description]
* @return [description]
*/
virtual SizeType SearchObjectsInRadiusExclusive(PointerType& ThisObject, const double& Radius, ResultIteratorType& Results, DistanceIteratorType ResultDistances) {
/// Missing API for 'SearchObjectsInRadiusExclusive' without 'MaxNumberOfResults'
KRATOS_ERROR << "Missing implementation of SearchObjectsInRadiusExclusive(PointerType, const double, ResultIteratorType, DistanceIteratorType)" << std::endl;
}
/**
* [SearchObjectsInRadiusExclusive description]
* @param ThisObject [description]
* @param Radius [description]
* @param Results [description]
* @param ResultDistances [description]
* @param MaxNumberOfResults [description]
* @return [description]
*/
virtual SizeType SearchObjectsInRadiusExclusive(PointerType& ThisObject, const double& Radius, ResultIteratorType& Results, DistanceIteratorType ResultDistances, const SizeType& MaxNumberOfResults) {
PointType Low, High;
SearchStructureType Box;
SizeType NumberOfResults = 0;
TConfigure::CalculateBoundingBox(ThisObject, Low, High, Radius);
Box.Set( CalculateCell(Low), CalculateCell(High), mN );
SearchInRadiusExclusive(ThisObject, Radius, Results, ResultDistances, NumberOfResults, MaxNumberOfResults, Box );
return NumberOfResults;
}
/// Batch search API (needs to be extended with the missing functions)
/**
* [SearchObjectsInRadius description]
* @param ThisObjects [description]
* @param NumberOfObjects [description]
* @param Radius [description]
* @param Results [description]
* @param NumberOfResults [description]
* @param MaxNumberOfResults [description]
*/
void SearchObjectsInRadius(IteratorType const& ThisObjects, SizeType const& NumberOfObjects, const std::vector<double>& Radius, std::vector<std::vector<PointerType> >& Results, std::vector<SizeType>& NumberOfResults, SizeType const& MaxNumberOfResults) {
struct tls_type
{
PointType Low;
PointType High;
SearchStructureType Box;
};
IndexPartition<std::size_t>(NumberOfObjects).for_each(tls_type(), [&](std::size_t i, tls_type& rTLS){
ResultIteratorType ResultsPointer = Results[i].begin();
NumberOfResults[i] = 0;
TConfigure::CalculateBoundingBox(ThisObjects[i], rTLS.Low, rTLS.High, Radius[i]);
rTLS.Box.Set( CalculateCell(rTLS.Low), CalculateCell(rTLS.High), mN );
SearchInRadius(ThisObjects[i], Radius[i], ResultsPointer, NumberOfResults[i], MaxNumberOfResults, rTLS.Box );
});
}
/**
* [SearchObjectsInRadius description]
* @param ThisObjects [description]
* @param NumberOfObjects [description]
* @param Radius [description]
* @param Results [description]
* @param ResultsDistances [description]
* @param NumberOfResults [description]
* @param MaxNumberOfResults [description]
*/
void SearchObjectsInRadius(IteratorType const& ThisObjects, SizeType const& NumberOfObjects, const std::vector<double>& Radius, std::vector<std::vector<PointerType> >& Results, std::vector<std::vector<double> >& ResultsDistances, std::vector<SizeType>& NumberOfResults, SizeType const& MaxNumberOfResults) {
PointType Low, High;
SearchStructureType Box;
#pragma omp parallel for private(Low,High,Box)
for(int i = 0; i < static_cast<int>(NumberOfObjects); i++) {
ResultIteratorType ResultsPointer = Results[i].begin();
DistanceIteratorType ResultsDistancesPointer = ResultsDistances[i].begin();
NumberOfResults[i] = 0;
TConfigure::CalculateBoundingBox(ThisObjects[i], Low, High, Radius[i]);
Box.Set( CalculateCell(Low), CalculateCell(High), mN );
SearchInRadius(ThisObjects[i], Radius[i], ResultsPointer, ResultsDistancesPointer, NumberOfResults[i], MaxNumberOfResults, Box );
}
}
/**
* [SearchObjectsInRadiusExclusive description]
* @param ThisObjects [description]
* @param NumberOfObjects [description]
* @param Radius [description]
* @param Results [description]
* @param NumberOfResults [description]
* @param MaxNumberOfResults [description]
*/
virtual void SearchObjectsInRadiusExclusive(IteratorType const& ThisObjects, SizeType const& NumberOfObjects, const std::vector<double>& Radius, std::vector<std::vector<PointerType> >& Results, std::vector<SizeType>& NumberOfResults, SizeType const& MaxNumberOfResults) {
PointType Low, High;
SearchStructureType Box;
#pragma omp parallel for private(Low,High,Box)
for(int i = 0; i < static_cast<int>(NumberOfObjects); i++) {
ResultIteratorType ResultsPointer = Results[i].begin();
NumberOfResults[i] = 0;
TConfigure::CalculateBoundingBox(ThisObjects[i], Low, High, Radius[i]);
Box.Set( CalculateCell(Low), CalculateCell(High), mN );
SearchInRadiusExclusive(ThisObjects[i], Radius[i], ResultsPointer, NumberOfResults[i], MaxNumberOfResults, Box );
}
}
/**
* [SearchObjectsInRadiusExclusive description]
* @param ThisObjects [description]
* @param NumberOfObjects [description]
* @param Radius [description]
* @param Results [description]
* @param ResultsDistances [description]
* @param NumberOfResults [description]
* @param MaxNumberOfResults [description]
*/
virtual void SearchObjectsInRadiusExclusive(IteratorType const& ThisObjects, SizeType const& NumberOfObjects, const std::vector<double>& Radius, std::vector<std::vector<PointerType> >& Results, std::vector<std::vector<double> >& ResultsDistances, std::vector<SizeType>& NumberOfResults, SizeType const& MaxNumberOfResults) {
PointType Low, High;
SearchStructureType Box;
#pragma omp parallel for private(Low,High,Box)
for(int i = 0; i < static_cast<int>(NumberOfObjects); i++) {
ResultIteratorType ResultsPointer = Results[i].begin();
DistanceIteratorType ResultsDistancesPointer = ResultsDistances[i].begin();
NumberOfResults[i] = 0;
TConfigure::CalculateBoundingBox(ThisObjects[i], Low, High, Radius[i]);
Box.Set( CalculateCell(Low), CalculateCell(High), mN );
SearchInRadiusExclusive(ThisObjects[i], Radius[i], ResultsPointer, ResultsDistancesPointer, NumberOfResults[i], MaxNumberOfResults, Box );
}
}
/// Contact search API
/**
* [SearchContact description]
* NOTE[Charlie]: Why this function does not return the number of results like the others?
* @param Result [description]
*/
void SearchContact(ContainerContactType& Result) {
for (CellContainerIterator icell = mCells.begin() ; icell!= mCells.end(); icell++) {
icell->SearchContact(Result);
}
}
/**
* [SearchContact description]
* @param Result [description]
* @param MaxNumberOfResults [description]
* @return [description]
*/
SizeType SearchContact(IteratorContactType& Result, const SizeType& MaxNumberOfResults ) {
SizeType NumberOfResults = 0;
for (CellContainerIterator icell = mCells.begin() ; icell!= mCells.end(); icell++) {
icell->SearchContact(Result, NumberOfResults, MaxNumberOfResults);
}
return NumberOfResults;
}
/// Add/Remove
/**
* [AddObject description]
* @param ThisObject [description]
*/
virtual void AddObject(const PointerType& ThisObject) {
PointType Low, High;
SearchStructureType Box;
TConfigure::CalculateBoundingBox(ThisObject, Low, High);
Box.Set( CalculateCell(Low), CalculateCell(High), mN );
FillObject(Box,ThisObject);
mObjectsSize++;
}
/**
* [RemoveObject description]
* @param ThisObject [description]
*/
void RemoveObject(const PointerType& ThisObject) {
PointType Low, High;
SearchStructureType Box;
TConfigure::CalculateBoundingBox(ThisObject, Low, High);
Box.Set( CalculateCell(Low), CalculateCell(High), mN );
RemoveObjectLocal(Box,ThisObject);
mObjectsSize--;
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/** Calculates the IndexArray (x[,y[,z]]) of the provided object.
* Calculates the IndexArray (x[,y[,z]]) of the provided object.
* The provided object must provide its coordinates through the [] operator.
* @param ThisObject Input Object
* @return Cell coordinates of 'ThisObject' in the bins
*/
template<class GenericCoordType>
IndexArray CalculateCell(const GenericCoordType& ThisObject) {
IndexArray IndexCell;
for(SizeType i = 0 ; i < Dimension ; i++) {
IndexCell[i] = CalculatePosition(ThisObject[i],i);
}
return IndexCell;
}
/** Calculates the Index of the provided object.
* Calculates the Index of the provided object.
* The provided object must provide its coordinates through the [] operator.
* @param ThisObject Input Object
* @return Cell index of 'ThisObject' in the bins
*/
template<class GenericCoordType>
IndexType CalculateIndex(const GenericCoordType& ThisObject) {
IndexType Index = 0;
for(SizeType iDim = Dimension-1 ; iDim > 0 ; iDim--) {
Index += CalculatePosition(ThisObject[iDim],iDim);
Index *= mN[iDim-1];
}
Index += CalculatePosition(ThisObject[0],0);
return Index;
}
/**
* [CalculatePosition description]
* @param ThisCoord [description]
* @param ThisDimension [description]
* @return [description]
*/
virtual IndexType CalculatePosition(CoordinateType const& ThisCoord, const SizeType& ThisDimension) {
CoordinateType d_index = (ThisCoord - mMinPoint[ThisDimension]) * mInvCellSize[ThisDimension];
IndexType index = static_cast<IndexType>( (d_index < 0.00) ? 0.00 : d_index );
return (index > mN[ThisDimension]-1) ? mN[ThisDimension]-1 : index;
}
///@}
///@name Access
///@{
/**
* @brief Get the Cell Container object
*
* @return CellContainerType& The Cell Container object
*/
CellContainerType& GetCellContainer() {
return mCells;
}
/**
* @brief Get the Divisions object
*
* @return SizeArray& Array containing the number of Cells in each dimension
*/
SizeArray& GetDivisions() {
return mN;
}
/**
* @brief Get the Cell Size object
*
* @return CoordinateArray& Array containing the size of the Cell in each dimension
*/
CoordinateArray& GetCellSize() {
return mCellSize;
}
/**
* @brief Get the Min Point object
*
* @return PointType& Min point of the bins
*/
PointType& GetMinPoint() {
return mMinPoint;
}
/**
* @brief Get the Max Point object
*
* @return PointType& Max point of the bins
*/
PointType& GetMaxPoint() {
return mMaxPoint;
}
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
virtual std::string Info() const {
return "BinsObjectDynamic" ;
}
/** Print information about this object.
* Print information about this object.
* @param rOStream [description]
*/
virtual void PrintInfo(std::ostream& rOStream) const {
rOStream << Info();
}
/** Print object's data.
* Print object's data.
* @param rOStream [description]
* @param Perfix [description]
*/
virtual void PrintData(std::ostream& rOStream, std::string const& Perfix = std::string()) const {
rOStream << " BinsSize: ";
for(SizeType i = 0 ; i < Dimension ; i++) {
rOStream << "[" << mN[i] << "]";
}
rOStream << std::endl;
rOStream << " CellSize: ";
for(SizeType i = 0 ; i < Dimension ; i++) {
rOStream << "[" << mCellSize[i] << "]";
}
rOStream << std::endl;
SizeType nn = 0;
for(SizeType i = 0 ; i < mCells.size(); i++) {
nn += mCells[i].Size();
}
rOStream << "NumPointers: " << nn << std::endl;
}
/** Print Size of Container
* Print Size of Container
* @param rout [description]
*/
void PrintSize(std::ostream& rout) {
rout << " BinsSize: ";
for(SizeType i = 0 ; i < Dimension ; i++) {
rout << "[" << mN[i] << "]";
}
rout << std::endl;
}
/** Print Limits Points of the Container
* Print Limits Points of the Container
* @param rout [description]
*/
void PrintBox(std::ostream& rout) {
rout << " BinsBox: Min [";
mMinPoint.Print(rout);
rout << "]; Max [";
mMaxPoint.Print(rout);
rout << "]; Size [";
mCellSize.Print(rout);
rout << "]" << std::endl;
}
protected:
///@}
///@name Friends
///@{
///@}
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
/// It computes each object's boundinx box and uses it to find the max and min points
virtual void CalculateBoundingBox()
{
PointType Low, High;
TConfigure::CalculateBoundingBox(*mObjectsBegin,mMinPoint,mMaxPoint);
#ifdef _OPENMP
SizeType number_of_threads = omp_get_max_threads();
#else
SizeType number_of_threads = 1;
#endif
std::vector<SizeType> node_partition;
CreatePartition(number_of_threads, mObjectsSize, node_partition);
std::vector<PointType> Max(number_of_threads);
std::vector<PointType> Min(number_of_threads);
for(SizeType k=0; k<number_of_threads; k++ )
{
Max[k] = mMaxPoint;
Min[k] = mMinPoint;
}
IteratorType i_begin = mObjectsBegin;
IteratorType i_end = mObjectsEnd;
for (IteratorType i_object = i_begin ; i_object != i_end ; i_object++ )
{
TConfigure::CalculateBoundingBox(*i_object, Low, High);
for(SizeType i = 0 ; i < Dimension ; i++)
{
mMaxPoint[i] = (mMaxPoint[i] < High[i]) ? High[i] : mMaxPoint[i];
mMinPoint[i] = (mMinPoint[i] > Low[i]) ? Low[i] : mMinPoint[i];
}
}
auto Epsilon = PointType{mMaxPoint - mMinPoint};
for(SizeType i = 0 ; i < Dimension ; i++)
{
mMaxPoint[i] += Epsilon[i] * 0.01;
mMinPoint[i] -= Epsilon[i] * 0.01;
}
}
/**
* @brief Calculates the cell size of the bins.
*
* Calculates the cell size of the bins using an average aproximation of the objects in the bins.
*
* @param ApproximatedSize Aproximate number of objects that will be stored in the bins
*/
void CalculateCellSize(std::size_t ApproximatedSize)
{
std::size_t average_number_of_cells = static_cast<std::size_t>(std::pow(static_cast<double>(ApproximatedSize), 1.00 / Dimension));
std::array<double, 3> lengths;
double average_length = 0.00;
for (int i = 0; i < Dimension; i++) {
lengths[i] = mMaxPoint[i] - mMinPoint[i];
average_length += lengths[i];
}
average_length *= 1.00 / 3.00;
if (average_length < std::numeric_limits<double>::epsilon()) {
for(int i = 0; i < Dimension; i++) {
mN[i] = 1;
}
return;
}
for (int i = 0; i < Dimension; i++) {
mN[i] = static_cast<std::size_t>(lengths[i] / average_length * (double)average_number_of_cells) + 1;
if (mN[i] > 1) {
mCellSize[i] = lengths[i] / mN[i];
} else {
mCellSize[i] = average_length;
}
mInvCellSize[i] = 1.00 / mCellSize[i];
}
}
/**
* @brief Assigns the cell size of the bins using the provided CellSize.
*
* Assigns the cell size of the bins using the provided CellSize.
*
* @param CellSize Desired size of the cells.
*/
void AssignCellSize(CoordinateType CellSize)
{
for(SizeType i = 0 ; i < Dimension ; i++)
{
mCellSize[i] = CellSize;
mInvCellSize[i] = 1.00 / mCellSize[i];
mN[i] = static_cast<SizeType>( (mMaxPoint[i]-mMinPoint[i]) / mCellSize[i]) + 1;
}
}
virtual void GenerateBins()
{
PointType Low, High;
SearchStructureType Box;
/// Fill container with objects
for(IteratorType i_object = mObjectsBegin ; i_object != mObjectsEnd ; i_object++)
{
TConfigure::CalculateBoundingBox(*i_object, Low, High);
Box.Set( CalculateCell(Low), CalculateCell(High), mN );
FillObject(Box, *i_object);
}
}
// **** THREAD SAFE
// Dimension = 1
void SearchInBoxLocal(PointerType& ThisObject, ResultIteratorType& Result, SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,1>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
MinCell[0] = static_cast<CoordinateType>(Box.Axis[0].Min) * mCellSize[0] + mMinPoint[0]; //
MaxCell[0] = MinCell[0] + mCellSize[0];
for(IndexType I = Box.Axis[0].Begin() ; I <= Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0])
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell))
mCells[I].SearchObjects(ThisObject, Result, NumberOfResults, MaxNumberOfResults);
}
// Dimension = 2
void SearchInBoxLocal(PointerType& ThisObject, ResultIteratorType& Result, SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,2>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 2; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i];
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = Box.Axis[1].Begin() ; II <= Box.Axis[1].End() ; II += Box.Axis[1].Block, MinCell[1] += mCellSize[1], MaxCell[1] += mCellSize[1] )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0] )
{
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell))
mCells[I].SearchObjects(ThisObject, Result, NumberOfResults, MaxNumberOfResults);
}
}
}
// Dimension = 3
void SearchInBoxLocal(PointerType& ThisObject, ResultIteratorType& Result,
SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,3>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 3; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i]; //
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[2] = MinBox[2];
MaxCell[2] = MaxBox[2];
for(IndexType III = Box.Axis[2].Begin() ; III <= Box.Axis[2].End() ; III += Box.Axis[2].Block, MinCell[2] += mCellSize[2], MaxCell[2] += mCellSize[2] )
{
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = III + Box.Axis[1].Begin() ; II <= III + Box.Axis[1].End() ; II += Box.Axis[1].Block, MinCell[1] += mCellSize[1], MaxCell[1] += mCellSize[1] )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0] )
{
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell))
{
mCells[I].SearchObjects(ThisObject, Result, NumberOfResults, MaxNumberOfResults);
}
}
}
}
}
// **** THREAD SAFE
// Dimension = 1
void SearchInBoxLocal(PointerType& ThisObject, ResultContainerType& Result,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,1>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
MinCell[0] = static_cast<CoordinateType>(Box.Axis[0].Min) * mCellSize[0] + mMinPoint[0]; //
MaxCell[0] = MinCell[0] + mCellSize[0];
for(IndexType I = Box.Axis[0].Begin() ; I <= Box.Axis[0].End() ; I += Box.Axis[0].Block )
{
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell))
mCells[I].SearchObjects(ThisObject, Result);
}
}
// Dimension = 2
void SearchInBoxLocal(PointerType& ThisObject, ResultContainerType& Result,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,2>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 2; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i];
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = Box.Axis[1].Begin() ; II <= Box.Axis[1].End() ; II += Box.Axis[1].Block )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block )
{
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell))
mCells[I].SearchObjects(ThisObject, Result);
}
}
}
// Dimension = 3
void SearchInBoxLocal(PointerType& ThisObject, ResultContainerType& Result,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,3>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 3; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i]; //
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[2] = MinBox[2];
MaxCell[2] = MaxBox[2];
for(IndexType III = Box.Axis[2].Begin() ; III <= Box.Axis[2].End() ; III += Box.Axis[2].Block )
{
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = III + Box.Axis[1].Begin() ; II <= III + Box.Axis[1].End() ; II += Box.Axis[1].Block )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block )
{
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell))
{
mCells[I].SearchObjects(ThisObject, Result);
}
}
}
}
}
// **** THREAD SAFE
// Dimension = 1
void SearchObjectLocalExclusive(PointerType& ThisObject, ResultIteratorType& Result, SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,1>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
MinCell[0] = static_cast<CoordinateType>(Box.Axis[0].Min) * mCellSize[0] + mMinPoint[0]; //
MaxCell[0] = MinCell[0] + mCellSize[0];
for(IndexType I = Box.Axis[0].Begin() ; I <= Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0])
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell))
mCells[I].SearchObjectsExclusive(ThisObject, Result, NumberOfResults, MaxNumberOfResults);
}
// Dimension = 2
void SearchObjectLocalExclusive(PointerType& ThisObject, ResultIteratorType& Result, SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,2>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 2; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i];
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = Box.Axis[1].Begin() ; II <= Box.Axis[1].End() ; II += Box.Axis[1].Block, MinCell[1] += mCellSize[1], MaxCell[1] += mCellSize[1] )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0] )
{
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell))
mCells[I].SearchObjectsExclusive(ThisObject, Result, NumberOfResults, MaxNumberOfResults);
}
}
}
// Dimension = 3
void SearchObjectLocalExclusive(PointerType& ThisObject, ResultIteratorType& Result,
SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,3>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 3; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i]; //
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[2] = MinBox[2];
MaxCell[2] = MaxBox[2];
for(IndexType III = Box.Axis[2].Begin() ; III <= Box.Axis[2].End() ; III += Box.Axis[2].Block, MinCell[2] += mCellSize[2], MaxCell[2] += mCellSize[2] )
{
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = III + Box.Axis[1].Begin() ; II <= III + Box.Axis[1].End() ; II += Box.Axis[1].Block, MinCell[1] += mCellSize[1], MaxCell[1] += mCellSize[1] )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0] )
{
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell))
{
mCells[I].SearchObjectsExclusive(ThisObject, Result, NumberOfResults, MaxNumberOfResults);
}
}
}
}
}
// **** THREAD SAFE
// Dimension = 1
void SearchObjectLocalExclusive(PointerType& ThisObject, ResultContainerType& Result,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,1>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
MinCell[0] = static_cast<CoordinateType>(Box.Axis[0].Min) * mCellSize[0] + mMinPoint[0]; //
MaxCell[0] = MinCell[0] + mCellSize[0];
for(IndexType I = Box.Axis[0].Begin() ; I <= Box.Axis[0].End() ; I += Box.Axis[0].Block )
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell))
mCells[I].SearchObjectsExclusive(ThisObject, Result);
}
// Dimension = 2
void SearchObjectLocalExclusive(PointerType& ThisObject, ResultContainerType& Result,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,2>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 2; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i];
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = Box.Axis[1].Begin() ; II <= Box.Axis[1].End() ; II += Box.Axis[1].Block )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block )
{
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell))
mCells[I].SearchObjectsExclusive(ThisObject, Result);
}
}
}
// Dimension = 3
void SearchObjectLocalExclusive(PointerType& ThisObject, ResultContainerType& Result,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,3>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 3; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i]; //
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[2] = MinBox[2];
MaxCell[2] = MaxBox[2];
for(IndexType III = Box.Axis[2].Begin() ; III <= Box.Axis[2].End() ; III += Box.Axis[2].Block )
{
MinCell[2] = MinBox[2];
MaxCell[2] = MaxBox[2];
for(IndexType II = III + Box.Axis[1].Begin() ; II <= III + Box.Axis[1].End() ; II += Box.Axis[1].Block )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block )
{
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell))
{
mCells[I].SearchObjectsExclusive(ThisObject, Result);
}
}
}
}
}
// **** THREAD SAFE
// Dimension = 1
void SearchInRadius(PointerType& ThisObject, CoordinateType const& Radius, ResultIteratorType& Result, SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,1>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
MinCell[0] = static_cast<CoordinateType>(Box.Axis[0].Min) * mCellSize[0] + mMinPoint[0]; //
MaxCell[0] = MinCell[0] + mCellSize[0];
for(IndexType I = Box.Axis[0].Begin() ; I <= Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0])
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell, Radius))
mCells[I].SearchObjectsInRaius(ThisObject, Radius, Result, NumberOfResults, MaxNumberOfResults);
}
// Dimension = 2
void SearchInRadius(PointerType& ThisObject, CoordinateType const& Radius, ResultIteratorType& Result, SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,2>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 2; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i];
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = Box.Axis[1].Begin() ; II <= Box.Axis[1].End() ; II += Box.Axis[1].Block, MinCell[1] += mCellSize[1], MaxCell[1] += mCellSize[1] )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0] )
{
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell, Radius))
mCells[I].SearchObjectsInRaius(ThisObject, Radius, Result, NumberOfResults, MaxNumberOfResults);
}
}
}
// Dimension = 3
void SearchInRadius(PointerType& ThisObject, CoordinateType const& Radius, ResultIteratorType& Result, SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,3>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 3; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i]; //
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[2] = MinBox[2];
MaxCell[2] = MaxBox[2];
for(IndexType III = Box.Axis[2].Begin() ; III <= Box.Axis[2].End() ; III += Box.Axis[2].Block, MinCell[2] += mCellSize[2], MaxCell[2] += mCellSize[2] )
{
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = III + Box.Axis[1].Begin() ; II <= III + Box.Axis[1].End() ; II += Box.Axis[1].Block, MinCell[1] += mCellSize[1], MaxCell[1] += mCellSize[1] )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0] )
{
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell, Radius))
{
mCells[I].SearchObjectsInRadius(ThisObject, Radius, Result, NumberOfResults, MaxNumberOfResults);
}
}
}
}
}
// **** THREAD SAFE
// Dimension = 1
void SearchInRadius(PointerType& ThisObject, CoordinateType const& Radius, ResultIteratorType& Result, DistanceIteratorType ResultDistances, SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,1>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
MinCell[0] = static_cast<CoordinateType>(Box.Axis[0].Min) * mCellSize[0] + mMinPoint[0]; //
MaxCell[0] = MinCell[0] + mCellSize[0];
for(IndexType I = Box.Axis[0].Begin() ; I <= Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0])
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell, Radius))
mCells[I].SearchObjectsInRaius(ThisObject, Radius, Result, ResultDistances, NumberOfResults, MaxNumberOfResults);
}
// Dimension = 2
void SearchInRadius(PointerType& ThisObject, CoordinateType const& Radius, ResultIteratorType& Result, DistanceIteratorType ResultDistances, SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,2>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 2; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i];
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = Box.Axis[1].Begin() ; II <= Box.Axis[1].End() ; II += Box.Axis[1].Block, MinCell[1] += mCellSize[1], MaxCell[1] += mCellSize[1] )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0] )
{
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell, Radius))
mCells[I].SearchObjectsInRaius(ThisObject, Radius, Result, ResultDistances, NumberOfResults, MaxNumberOfResults);
}
}
}
// Dimension = 3
void SearchInRadius(PointerType& ThisObject, CoordinateType const& Radius, ResultIteratorType& Result, DistanceIteratorType ResultDistances, SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,3>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 3; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i]; //
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[2] = MinBox[2];
MaxCell[2] = MaxBox[2];
for(IndexType III = Box.Axis[2].Begin() ; III <= Box.Axis[2].End() ; III += Box.Axis[2].Block, MinCell[2] += mCellSize[2], MaxCell[2] += mCellSize[2] )
{
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = III + Box.Axis[1].Begin() ; II <= III + Box.Axis[1].End() ; II += Box.Axis[1].Block, MinCell[1] += mCellSize[1], MaxCell[1] += mCellSize[1] )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0] )
{
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell, Radius))
{
mCells[I].SearchObjectsInRadius(ThisObject, Radius, Result, ResultDistances, NumberOfResults, MaxNumberOfResults);
}
}
}
}
}
// **** THREAD SAFE
// Dimension = 1
virtual void SearchInRadiusExclusive(PointerType& ThisObject, CoordinateType const& Radius, ResultIteratorType& Result, SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,1>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
MinCell[0] = static_cast<CoordinateType>(Box.Axis[0].Min) * mCellSize[0] + mMinPoint[0]; //
MaxCell[0] = MinCell[0] + mCellSize[0];
for(IndexType I = Box.Axis[0].Begin() ; I <= Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0])
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell, Radius))
mCells[I].SearchObjectsInRadiusExclusive(ThisObject, Radius, Result, NumberOfResults, MaxNumberOfResults);
}
// Dimension = 2
virtual void SearchInRadiusExclusive(PointerType& ThisObject, CoordinateType const& Radius, ResultIteratorType& Result, SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,2>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 2; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i];
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = Box.Axis[1].Begin() ; II <= Box.Axis[1].End() ; II += Box.Axis[1].Block, MinCell[1] += mCellSize[1], MaxCell[1] += mCellSize[1] )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0] )
{
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell, Radius))
mCells[I].SearchObjectsInRadiusExclusive(ThisObject, Radius, Result, NumberOfResults, MaxNumberOfResults);
}
}
}
// Dimension = 3
virtual void SearchInRadiusExclusive(PointerType& ThisObject, CoordinateType const& Radius, ResultIteratorType& Result, SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,3>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 3; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i]; //
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[2] = MinBox[2];
MaxCell[2] = MaxBox[2];
for(IndexType III = Box.Axis[2].Begin() ; III <= Box.Axis[2].End() ; III += Box.Axis[2].Block, MinCell[2] += mCellSize[2], MaxCell[2] += mCellSize[2] )
{
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = III + Box.Axis[1].Begin() ; II <= III + Box.Axis[1].End() ; II += Box.Axis[1].Block, MinCell[1] += mCellSize[1], MaxCell[1] += mCellSize[1] )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0] )
{
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell, Radius))
{
mCells[I].SearchObjectsInRadiusExclusive(ThisObject, Radius, Result, NumberOfResults, MaxNumberOfResults);
}
}
}
}
}
// **** THREAD SAFE
// Dimension = 1
virtual void SearchInRadiusExclusive(PointerType& ThisObject, CoordinateType const& Radius, ResultIteratorType& Result, DistanceIteratorType ResultDistances, SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,1>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
MinCell[0] = static_cast<CoordinateType>(Box.Axis[0].Min) * mCellSize[0] + mMinPoint[0]; //
MaxCell[0] = MinCell[0] + mCellSize[0];
for(IndexType I = Box.Axis[0].Begin() ; I <= Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0])
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell, Radius))
mCells[I].SearchObjectsInRadiusExclusive(ThisObject, Radius, Result, ResultDistances, NumberOfResults, MaxNumberOfResults);
}
// Dimension = 2
virtual void SearchInRadiusExclusive(PointerType& ThisObject, CoordinateType const& Radius, ResultIteratorType& Result, DistanceIteratorType ResultDistances, SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,2>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 2; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i];
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = Box.Axis[1].Begin() ; II <= Box.Axis[1].End() ; II += Box.Axis[1].Block, MinCell[1] += mCellSize[1], MaxCell[1] += mCellSize[1] )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0] )
{
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell, Radius))
mCells[I].SearchObjectsInRadiusExclusive(ThisObject, Radius, Result, ResultDistances, NumberOfResults, MaxNumberOfResults);
}
}
}
// Dimension = 3
virtual void SearchInRadiusExclusive(PointerType& ThisObject, CoordinateType const& Radius, ResultIteratorType& Result, DistanceIteratorType ResultDistances, SizeType& NumberOfResults, const SizeType& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,3>& Box )
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 3; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i]; //
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[2] = MinBox[2];
MaxCell[2] = MaxBox[2];
for(IndexType III = Box.Axis[2].Begin() ; III <= Box.Axis[2].End() ; III += Box.Axis[2].Block, MinCell[2] += mCellSize[2], MaxCell[2] += mCellSize[2] )
{
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = III + Box.Axis[1].Begin() ; II <= III + Box.Axis[1].End() ; II += Box.Axis[1].Block, MinCell[1] += mCellSize[1], MaxCell[1] += mCellSize[1] )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0] += mCellSize[0], MaxCell[0] += mCellSize[0] )
{
if(TConfigure::IntersectionBox(ThisObject, MinCell, MaxCell, Radius))
{
mCells[I].SearchObjectsInRadiusExclusive(ThisObject, Radius, Result, ResultDistances, NumberOfResults, MaxNumberOfResults);
}
}
}
}
}
// Dimension = 1
void FillObject( SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,1>& Box, const PointerType& i_object)
{
PointType MinCell, MaxCell;
MinCell[0] = static_cast<CoordinateType>(Box.Axis[0].Min) * mCellSize[0] + mMinPoint[0]; //
MaxCell[0] = MinCell[0] + mCellSize[0];
for(IndexType I = Box.Axis[0].Begin() ; I <= Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0]+=mCellSize[0], MaxCell[0]+=mCellSize[0] )
{
if(TConfigure::IntersectionBox(i_object, MinCell, MaxCell))
mCells[I].Add(i_object);
}
}
// Dimension = 2
void FillObject( SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,2>& Box, const PointerType& i_object)
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 2; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i];
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = Box.Axis[1].Begin() ; II <= Box.Axis[1].End() ; II += Box.Axis[1].Block, MinCell[1]+=mCellSize[1], MaxCell[1]+=mCellSize[1] )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0]+=mCellSize[0], MaxCell[0]+=mCellSize[0] )
{
if(TConfigure::IntersectionBox(i_object,MinCell,MaxCell))
mCells[I].Add(i_object);
}
}
}
// Dimension = 3
virtual void FillObject( SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,3>& Box, const PointerType& i_object)
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 3; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i]; //
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[2] = MinBox[2];
MaxCell[2] = MaxBox[2];
for(IndexType III = Box.Axis[2].Begin() ; III <= Box.Axis[2].End() ; III += Box.Axis[2].Block, MinCell[2]+=mCellSize[2], MaxCell[2]+=mCellSize[2] )
{
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = III + Box.Axis[1].Begin() ; II <= III + Box.Axis[1].End() ; II += Box.Axis[1].Block, MinCell[1]+=mCellSize[1], MaxCell[1]+=mCellSize[1] )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0]+=mCellSize[0], MaxCell[0]+=mCellSize[0] )
{
if(TConfigure::IntersectionBox(i_object,MinCell,MaxCell))
mCells[I].Add(i_object);
}
}
}
}
// Dimension = 1
void RemoveObjectLocal( SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,1>& Box, const PointerType& i_object)
{
PointType MinCell, MaxCell;
MinCell[0] = static_cast<CoordinateType>(Box.Axis[0].Min) * mCellSize[0] + mMinPoint[0]; //
MaxCell[0] = MinCell[0] + mCellSize[0];
for(IndexType I = Box.Axis[0].Begin() ; I <= Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0]+=mCellSize[0], MaxCell[0]+=mCellSize[0] )
{
if(TConfigure::IntersectionBox(i_object, MinCell, MaxCell))
mCells[I].Remove(i_object);
}
}
// Dimension = 2
void RemoveObjectLocal( SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,2>& Box, const PointerType& i_object)
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 2; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i];
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = Box.Axis[1].Begin() ; II <= Box.Axis[1].End() ; II += Box.Axis[1].Block, MinCell[1]+=mCellSize[1], MaxCell[1]+=mCellSize[1] )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0]+=mCellSize[0], MaxCell[0]+=mCellSize[0] )
{
if(TConfigure::IntersectionBox(i_object,MinCell,MaxCell))
mCells[I].Remove(i_object);
}
}
}
// Dimension = 3
void RemoveObjectLocal( SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,3>& Box, const PointerType& i_object)
{
PointType MinCell, MaxCell;
PointType MinBox, MaxBox;
for(SizeType i = 0; i < 3; i++)
{
MinBox[i] = static_cast<CoordinateType>(Box.Axis[i].Min) * mCellSize[i] + mMinPoint[i]; //
MaxBox[i] = MinBox[i] + mCellSize[i];
}
MinCell[2] = MinBox[2];
MaxCell[2] = MaxBox[2];
for(IndexType III = Box.Axis[2].Begin() ; III <= Box.Axis[2].End() ; III += Box.Axis[2].Block, MinCell[2]+=mCellSize[2], MaxCell[2]+=mCellSize[2] )
{
MinCell[1] = MinBox[1];
MaxCell[1] = MaxBox[1];
for(IndexType II = III + Box.Axis[1].Begin() ; II <= III + Box.Axis[1].End() ; II += Box.Axis[1].Block, MinCell[1]+=mCellSize[1], MaxCell[1]+=mCellSize[1] )
{
MinCell[0] = MinBox[0];
MaxCell[0] = MaxBox[0];
for(IndexType I = II + Box.Axis[0].Begin() ; I <= II + Box.Axis[0].End() ; I += Box.Axis[0].Block, MinCell[0]+=mCellSize[0], MaxCell[0]+=mCellSize[0] )
{
if(TConfigure::IntersectionBox(i_object,MinCell,MaxCell))
mCells[I].Remove(i_object);
}
}
}
}
void AllocateContainer()
{
SizeType Size = mN[0];
for(SizeType i = 1 ; i < Dimension ; i++)
Size *= mN[i];
mCells.resize(Size);
}
inline void CreatePartition(SizeType number_of_threads, const SizeType number_of_rows, std::vector<SizeType>& partitions)
{
partitions.resize(number_of_threads+1);
SizeType partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for(SizeType i = 1; i<number_of_threads; i++)
partitions[i] = partitions[i-1] + partition_size ;
}
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
PointType mMinPoint;
PointType mMaxPoint;
SizeType mObjectsSize;
IteratorType mObjectsBegin;
IteratorType mObjectsEnd;
CoordinateArray mCellSize;
CoordinateArray mInvCellSize;
SizeArray mN;
CellContainerType mCells; ///The bin
private:
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
public:
/// Assignment operator.
BinsObjectDynamic<TConfigure> & operator=(const BinsObjectDynamic<TConfigure> & rOther)
{
mMinPoint = rOther.mMinPoint;
mMaxPoint = rOther.mMaxPoint;
mObjectsBegin = rOther.mObjectsBegin;
mObjectsEnd = rOther.mObjectsEnd;
mObjectsSize = rOther.mObjectsSize;
mCellSize = rOther.mCellSize;
mInvCellSize = rOther.mInvCellSize;
mN = rOther.mN;
mCells = rOther.mCells;
return *this;
}
/// Copy constructor.
BinsObjectDynamic(const BinsObjectDynamic& rOther)
{
*this = rOther;
}
/// Copy constructor.
template<class T>
BinsObjectDynamic(const BinsObjectDynamic<T>& rOther)
{
*this = rOther;
}
}; // Class BinsObjectDynamic
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
template<class TConfigure>
inline std::istream& operator >> (std::istream& rIStream,
BinsObjectDynamic<TConfigure>& rThis)
{
return rIStream;
}
/// output stream function
template<class TConfigure>
inline std::ostream& operator << (std::ostream& rOStream,
const BinsObjectDynamic<TConfigure> & rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
} // namespace Kratos.
#endif // KRATOS_FILENAME_H_INCLUDED defined
|
GB_unop__cosh_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__cosh_fp64_fp64
// op(A') function: GB_unop_tran__cosh_fp64_fp64
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = cosh (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = cosh (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = cosh (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_COSH || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__cosh_fp64_fp64
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = cosh (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__cosh_fp64_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
core_slansy.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zlansy.c, normal z -> s, Fri Sep 28 17:38:21 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "core_lapack.h"
#include <math.h>
/******************************************************************************/
__attribute__((weak))
void plasma_core_slansy(plasma_enum_t norm, plasma_enum_t uplo,
int n,
const float *A, int lda,
float *work, float *value)
{
*value = LAPACKE_slansy_work(LAPACK_COL_MAJOR,
lapack_const(norm),
lapack_const(uplo),
n, A, lda, work);
}
/******************************************************************************/
void plasma_core_omp_slansy(plasma_enum_t norm, plasma_enum_t uplo,
int n,
const float *A, int lda,
float *work, float *value,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(in:A[0:lda*n]) \
depend(out:value[0:1])
{
if (sequence->status == PlasmaSuccess)
plasma_core_slansy(norm, uplo, n, A, lda, work, value);
}
}
/******************************************************************************/
void plasma_core_omp_slansy_aux(plasma_enum_t norm, plasma_enum_t uplo,
int n,
const float *A, int lda,
float *value,
plasma_sequence_t *sequence, plasma_request_t *request)
{
switch (norm) {
case PlasmaOneNorm:
case PlasmaInfNorm:
#pragma omp task depend(in:A[0:lda*n]) \
depend(out:value[0:n])
{
if (sequence->status == PlasmaSuccess) {
if (uplo == PlasmaUpper) {
for (int i = 0; i < n; i++)
value[i] = 0.0;
for (int j = 0; j < n; j++) {
for (int i = 0; i < j; i++) {
value[i] += fabsf(A[lda*j+i]);
value[j] += fabsf(A[lda*j+i]);
}
value[j] += fabsf(A[lda*j+j]);
}
}
else { // PlasmaLower
for (int i = 0; i < n; i++)
value[i] = 0.0;
for (int j = 0; j < n; j++) {
value[j] += fabsf(A[lda*j+j]);
for (int i = j+1; i < n; i++) {
value[i] += fabsf(A[lda*j+i]);
value[j] += fabsf(A[lda*j+i]);
}
}
}
}
}
break;
}
}
|
GB_unop__identity_int32_uint8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int32_uint8)
// op(A') function: GB (_unop_tran__identity_int32_uint8)
// C type: int32_t
// A type: uint8_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int32_t z = (int32_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = (int32_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int32_uint8)
(
int32_t *Cx, // Cx and Ax may be aliased
const uint8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint8_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
int32_t z = (int32_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint8_t aij = Ax [p] ;
int32_t z = (int32_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int32_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_builder.c | //------------------------------------------------------------------------------
// GB_builder: build a matrix from tuples
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// CALLED BY: GB_build, GB_wait, GB_transpose, GB_concat_hyper
// This function is called by GB_build to build a matrix T for GrB_Matrix_build
// or GrB_Vector_build, by GB_wait to build a matrix T from the list of pending
// tuples, and by GB_transpose to transpose a matrix or vector. Duplicates can
// appear if called by GB_build or GB_wait, but not GB_transpose.
// The indices are provided either as (I_input,J_input) or (I_work,J_work), not
// both. The values are provided as S_input or S_work, not both. On return,
// the *work arrays are either transplanted into T, or freed, since they are
// temporary workspaces.
// The work is done in major 5 Steps, some of which can be skipped, depending
// on how the tuples are provided (*_work or *_input), and whether or not they
// are sorted, or have duplicates. If vdim <= 1, some work is skipped (for
// GrB_Vectors, and single-vector GrB_Matrices). Let e be the of tuples on
// input. Let p be the # of threads used.
// STEP 1: copy user input. O(e/p) read/write per thread, or skipped.
// STEP 2: sort the tuples. Time: O((e log e)/p), read/write, or skipped if
// the tuples are already sorted.
// STEP 3: count vectors and duplicates. O(e/p) reads, per thread, if no
// duplicates, or skipped if already done. O(e/p) read/writes
// per thread if duplicates appear.
// STEP 4: construct T->h and T->p. O(e/p) reads per thread, or skipped if
// T is a vector.
// STEP 5: assemble the tuples. O(e/p) read/writes per thread, or O(1) if the
// values can be transplanted into T as-is.
// For GrB_Matrix_build: If the input (I_input, J_input, S_input) is already
// sorted with no duplicates, and no typecasting needs to be done, then Step 1
// still must be done (each thread does O(e/p) reads of (I_input,J_input) and
// writes to I_work), but Step 1 also does the work for Step 3. Step 2 and 3
// are skipped. Step 4 does O(e/p) reads per thread (J_input only). Then
// I_work is transplanted into T->i. Step 5 does O(e/p) read/writes per thread
// to copy Sx into T->x.
// For GrB_Vector_build: as GrB_Matrix_build, Step 1 does O(e/p) read/writes
// per thread. The input is always a vector, so vdim == 1 always holds. Step
// 2 is skipped if the indices are already sorted, and Step 3 does no work at
// all unless duplicates appear. Step 4 takes no time, for any vector. Step 5
// does O(e/p) reads/writes per thread.
// For GB_wait: the pending tuples are provided as I_work, J_work, and S_work,
// so Step 1 is skipped (no need to check for invalid indices). The input
// J_work may be null (vdim can be anything, since GB_wait is used for both
// vectors and matrices). The tuples might be in sorted order already, which
// is known precisely known from A->Pending->sorted. Step 2 does
// O((e log e)/p) work to sort the tuples. Duplicates may appear, and
// out-of-order tuples are likely. Step 3 does O(e/p) read/writes. Step 4
// does O(e/p) reads per thread of (I_work,J_work), or just I_work. Step 5
// does O(e/p) read/writes per thread, or O(1) time if S_work can be
// transplanted into T->x.
// For GB_transpose: uses I_work, J_work, and either S_input (if no op applied
// to the values) or S_work (if an op was applied to the A->x values). This is
// only done for matrices, not vectors, so vdim > 1 will always hold. The
// indices are valid so Step 1 is skipped. The tuples are not sorted, so Step
// 2 takes O((e log e)/p) time to do the sort. There are no duplicates, so
// Step 3 only does O(e/p) reads of J_work to count the vectors in each slice.
// Step 4 only does O(e/p) reads of J_work to compute T->h and T->p. Step 5
// does O(e/p) read/writes per thread, but it uses the simpler case in
// GB_reduce_build_template since no duplicates can appear. It is unlikely
// able to transplant S_work into T->x since the input will almost always be
// unsorted.
// For GB_concat_hyper: uses I_work, J_work, and S_work. No duplicates
// appear. Tuples are not sorted on input. I_work is transplanted into C->i.
// J_work and S_work are freed on output. S_work is not transplanted into
// C->x.
// For iso inputs/outputs: T and Sx have the same iso property. If
// they are iso, then dup is always NULL. Duplicates may or may not appear
// if T and Sx are iso.
// (1) GrB_Matrix_build, GrB_Vector_build, and GB_wait do not pass in an iso
// Sx array, where Sx is S_input for GrB*build, and S_work for GB_wait.
// Sx and Tx are not iso. Duplicates may appear. dup is always present
// for GrB*build, but may be either NULL or non-NULL for GB_wait.
// (2) GxB_Matrix_build_Scalar and GxB_Vector_build_Scalar: always construct
// iso matrices. For those methods Sx and Tx are always iso, and no dup
// operator is be passed in (dup is NULL here, which is the implied 2nd
// operator). Duplicates may appear.
// (3) GB_transpose and GB_concat_hyper can pass in Sx as iso or
// non-iso, and always passes in dup as NULL since there are no
// duplicates. Sx and Tx are either both iso, or both non-iso.
// This method always returns T as hypersparse, and T is iso if and only
// if Sx is iso.
#include "GB_build.h"
#include "GB_sort.h"
#include "GB_binop.h"
#ifndef GBCOMPACT
#include "GB_red__include.h"
#endif
#define GB_I_WORK(t) (((t) < 0) ? -1 : I_work [t])
#define GB_J_WORK(t) (((t) < 0) ? -1 : ((J_work == NULL) ? 0 : J_work [t]))
#define GB_K_WORK(t) (((t) < 0) ? -1 : ((K_work == NULL) ? t : K_work [t]))
#define GB_FREE_WORK \
{ \
GB_WERK_POP (Work, int64_t) ; \
GB_FREE (I_work_handle, *I_work_size_handle) ; \
GB_FREE (J_work_handle, *J_work_size_handle) ; \
GB_FREE (S_work_handle, *S_work_size_handle) ; \
GB_FREE_WERK (&K_work, K_work_size) ; \
}
//------------------------------------------------------------------------------
// GB_builder
//------------------------------------------------------------------------------
GrB_Info GB_builder // build a matrix from tuples
(
GrB_Matrix T, // matrix to build, static or dynamic header
const GrB_Type ttype, // type of output matrix T
const int64_t vlen, // length of each vector of T
const int64_t vdim, // number of vectors in T
const bool is_csc, // true if T is CSC, false if CSR
int64_t **I_work_handle, // for (i,k) or (j,i,k) tuples
size_t *I_work_size_handle,
int64_t **J_work_handle, // for (j,i,k) tuples
size_t *J_work_size_handle,
GB_void **S_work_handle, // array of values of tuples, size ijslen,
// or size 1 if S is iso
size_t *S_work_size_handle,
bool known_sorted, // true if tuples known to be sorted
bool known_no_duplicates, // true if tuples known to not have dupl
int64_t ijslen, // size of I_work and J_work arrays
const bool is_matrix, // true if T a GrB_Matrix, false if vector
const int64_t *restrict I_input,// original indices, size nvals
const int64_t *restrict J_input,// original indices, size nvals
const GB_void *restrict S_input,// array of values of tuples, size nvals,
// or size 1 if S_input or S_work are iso
const bool S_iso, // true if S_input or S_work are iso
const int64_t nvals, // number of tuples, and size of K_work
const GrB_BinaryOp dup, // binary function to assemble duplicates,
// if NULL use the SECOND operator to
// keep the most recent duplicate.
const GrB_Type stype, // the type of S_work or S_input
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (T != NULL) ; // T is a static or dynamic header on input
ASSERT (nvals >= 0) ;
ASSERT_TYPE_OK (ttype, "ttype for builder", GB0) ;
ASSERT_BINARYOP_OK_OR_NULL (dup, "dup for builder", GB0) ;
ASSERT (I_work_handle != NULL) ;
ASSERT (J_work_handle != NULL) ;
ASSERT (S_work_handle != NULL) ;
ASSERT (!GB_OP_IS_POSITIONAL (dup)) ;
ASSERT (I_work_size_handle != NULL) ;
ASSERT (J_work_size_handle != NULL) ;
ASSERT (S_work_size_handle != NULL) ;
//--------------------------------------------------------------------------
// get Sx
//--------------------------------------------------------------------------
GB_void *restrict S_work = (*S_work_handle) ;
const GB_void *restrict Sx = (S_work == NULL) ? S_input : S_work ;
ASSERT (GB_IMPLIES (nvals > 0, Sx != NULL)) ;
ASSERT (GB_IMPLIES (S_iso, ttype == stype)) ;
ASSERT (GB_IMPLIES (S_iso, dup == NULL)) ;
//==========================================================================
// symbolic phase of the build =============================================
//==========================================================================
// The symbolic phase sorts the tuples and finds any duplicates. The
// output matrix T is constructed (not including T->i and T->x), and T->h
// and T->p are computed. Then I_work is transplanted into T->i, or T->i is
// allocated. T->x is then allocated. It is not computed until the
// numeric phase.
// When this function returns, I_work is either freed or transplanted into
// T->i. J_work is freed, and the I_work and J_work pointers (in the
// caller) are set to NULL by setting their handles to NULL. Note that
// J_work may already be NULL on input, if T has one or zero vectors
// (J_work_handle is always non-NULL however).
GrB_Info info ;
int64_t *restrict I_work = (*I_work_handle) ;
int64_t *restrict J_work = (*J_work_handle) ;
int64_t *restrict K_work = NULL ; size_t K_work_size = 0 ;
ASSERT (*J_work_size_handle == GB_Global_memtable_size (J_work)) ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (nvals, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
GB_WERK_DECLARE (Work, int64_t) ;
GB_WERK_PUSH (Work, 5*(nthreads+1), int64_t) ;
if (Work == NULL)
{
// out of memory
GB_FREE_WORK ;
return (GrB_OUT_OF_MEMORY) ;
}
memset (Work, 0, Work_nitems * sizeof (int64_t)) ;
int64_t *restrict tstart_slice = Work ; // nthreads+1
int64_t *restrict tnvec_slice = Work + (nthreads+1) ; // nthreads+1
int64_t *restrict tnz_slice = Work + 2*(nthreads+1) ; // nthreads+1
int64_t *restrict kbad = Work + 3*(nthreads+1) ; // nthreads
int64_t *restrict ilast_slice = Work + 4*(nthreads+1) ; // nthreads
//--------------------------------------------------------------------------
// partition the tuples for the threads
//--------------------------------------------------------------------------
// Thread tid handles tuples tstart_slice [tid] to tstart_slice [tid+1]-1.
// Each thread handles about the same number of tuples. This partition
// depends only on nvals.
GB_eslice (tstart_slice, nvals, nthreads) ;
// tstart_slice [tid]: first tuple in slice tid
// tnvec_slice [tid]: # of vectors that start in a slice. If a vector
// starts in one slice and ends in another, it is
// counted as being in the first slice.
// tnz_slice [tid]: # of entries in a slice after removing duplicates
// sentinel values for the final cumulative sum
tnvec_slice [nthreads] = 0 ;
tnz_slice [nthreads] = 0 ;
// this becomes true if the first pass computes tnvec_slice and tnz_slice,
// and if the (I_input,J_input) tuples were found to be already sorted with
// no duplicates present.
bool tnvec_and_tnz_slice_computed = false ;
//--------------------------------------------------------------------------
// STEP 1: copy user input and check if valid
//--------------------------------------------------------------------------
// If the indices are provided by (I_input,J_input), then import them into
// (I_work,J_work) and check if they are valid, and sorted. If the input
// happens to be already sorted, then duplicates are detected and the # of
// vectors in each slice is counted.
if (I_work == NULL)
{
//----------------------------------------------------------------------
// allocate I_work
//----------------------------------------------------------------------
// allocate workspace to load and sort the index tuples:
// vdim <= 1: I_work and K_work for (i,k) tuples, where i = I_input [k]
// vdim > 1: also J_work for (j,i,k) tuples where i = I_input [k] and
// j = J_input [k]. If the tuples are found to be already sorted on
// input, then J_work is not allocated, and J_input is used instead.
// The k value in the tuple gives the position in the original set of
// tuples: I_input [k] and Sx [k] when vdim <= 1, and also J_input [k]
// for matrices with vdim > 1.
// The workspace I_work and J_work are allocated here but freed (or
// transplanted) inside GB_builder. K_work is allocated, used, and
// freed in GB_builder.
ASSERT (J_work == NULL) ;
I_work = GB_MALLOC (nvals, int64_t, I_work_size_handle) ;
(*I_work_handle) = I_work ;
ijslen = nvals ;
if (I_work == NULL)
{
// out of memory
GB_FREE_WORK ;
return (GrB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// create the tuples to sort, and check for any invalid indices
//----------------------------------------------------------------------
known_sorted = true ;
bool no_duplicates_found = true ;
if (nvals == 0)
{
// nothing to do
}
else if (is_matrix)
{
//------------------------------------------------------------------
// C is a matrix; check both I_input and J_input
//------------------------------------------------------------------
ASSERT (J_input != NULL) ;
ASSERT (I_work != NULL) ;
ASSERT (vdim >= 0) ;
ASSERT (I_input != NULL) ;
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static) \
reduction(&&:known_sorted) reduction(&&:no_duplicates_found)
for (tid = 0 ; tid < nthreads ; tid++)
{
kbad [tid] = -1 ;
int64_t my_tnvec = 0 ;
int64_t kstart = tstart_slice [tid] ;
int64_t kend = tstart_slice [tid+1] ;
int64_t ilast = (kstart == 0) ? -1 : I_input [kstart-1] ;
int64_t jlast = (kstart == 0) ? -1 : J_input [kstart-1] ;
for (int64_t k = kstart ; k < kend ; k++)
{
// get k-th index from user input: (i,j)
int64_t i = I_input [k] ;
int64_t j = J_input [k] ;
if (i < 0 || i >= vlen || j < 0 || j >= vdim)
{
// halt if out of bounds
kbad [tid] = k ;
break ;
}
// check if the tuples are already sorted
known_sorted = known_sorted &&
((jlast < j) || (jlast == j && ilast <= i)) ;
// check if this entry is a duplicate of the one before it
no_duplicates_found = no_duplicates_found &&
(!(jlast == j && ilast == i)) ;
// copy the tuple into I_work. J_work is done later.
I_work [k] = i ;
if (j > jlast)
{
// vector j starts in this slice (but this is
// valid only if J_input is sorted on input)
my_tnvec++ ;
}
// log the last index seen
ilast = i ; jlast = j ;
}
// these are valid only if I_input and J_input are sorted on
// input, with no duplicates present.
tnvec_slice [tid] = my_tnvec ;
tnz_slice [tid] = kend - kstart ;
}
// collect the report from each thread
for (int tid = 0 ; tid < nthreads ; tid++)
{
if (kbad [tid] >= 0)
{
// invalid index
int64_t i = I_input [kbad [tid]] ;
int64_t j = J_input [kbad [tid]] ;
int64_t row = is_csc ? i : j ;
int64_t col = is_csc ? j : i ;
int64_t nrows = is_csc ? vlen : vdim ;
int64_t ncols = is_csc ? vdim : vlen ;
GB_FREE_WORK ;
GB_ERROR (GrB_INDEX_OUT_OF_BOUNDS,
"index (" GBd "," GBd ") out of bounds,"
" must be < (" GBd ", " GBd ")",
row, col, nrows, ncols) ;
}
}
// if the tuples were found to be already in sorted order, and if
// no duplicates were found, then tnvec_slice and tnz_slice are now
// valid, Otherwise, they can only be computed after sorting.
tnvec_and_tnz_slice_computed = known_sorted && no_duplicates_found ;
//------------------------------------------------------------------
// allocate J_work, if needed
//------------------------------------------------------------------
if (vdim > 1 && !known_sorted)
{
// copy J_input into J_work, so the tuples can be sorted
J_work = GB_MALLOC (nvals, int64_t, J_work_size_handle) ;
(*J_work_handle) = J_work ;
if (J_work == NULL)
{
// out of memory
GB_FREE_WORK ;
return (GrB_OUT_OF_MEMORY) ;
}
GB_memcpy (J_work, J_input, nvals * sizeof (int64_t), nthreads);
}
else
{
// J_work is a shallow copy of J_input. The pointer is not
// copied into (*J_work_handle), so it will not be freed.
// J_input is not modified, even though it is typecast to the
// int64_t *J_work, since J_work is not modified in this case.
J_work = (int64_t *) J_input ;
}
}
else
{
//------------------------------------------------------------------
// C is a typecasted GrB_Vector; check only I_input
//------------------------------------------------------------------
ASSERT (I_input != NULL) ;
ASSERT (J_input == NULL) ;
ASSERT (vdim == 1) ;
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static) \
reduction(&&:known_sorted) reduction(&&:no_duplicates_found)
for (tid = 0 ; tid < nthreads ; tid++)
{
kbad [tid] = -1 ;
int64_t kstart = tstart_slice [tid] ;
int64_t kend = tstart_slice [tid+1] ;
int64_t ilast = (kstart == 0) ? -1 : I_input [kstart-1] ;
for (int64_t k = kstart ; k < kend ; k++)
{
// get k-th index from user input: (i)
int64_t i = I_input [k] ;
if (i < 0 || i >= vlen)
{
// halt if out of bounds
kbad [tid] = k ;
break ;
}
// check if the tuples are already sorted
known_sorted = known_sorted && (ilast <= i) ;
// check if this entry is a duplicate of the one before it
no_duplicates_found = no_duplicates_found &&
(!(ilast == i)) ;
// copy the tuple into the work arrays to be sorted
I_work [k] = i ;
// log the last index seen
ilast = i ;
}
}
// collect the report from each thread
for (int tid = 0 ; tid < nthreads ; tid++)
{
if (kbad [tid] >= 0)
{
// invalid index
int64_t i = I_input [kbad [tid]] ;
GB_FREE_WORK ;
GB_ERROR (GrB_INDEX_OUT_OF_BOUNDS,
"index (" GBd ") out of bounds, must be < (" GBd ")",
i, vlen) ;
}
}
}
//----------------------------------------------------------------------
// determine if duplicates are possible
//----------------------------------------------------------------------
// The input is now known to be sorted, or not. If it is sorted, and
// if no duplicates were found, then it is known to have no duplicates.
// Otherwise, duplicates might appear, but a sort is required first to
// check for duplicates.
known_no_duplicates = known_sorted && no_duplicates_found ;
}
//--------------------------------------------------------------------------
// STEP 2: sort the tuples in ascending order
//--------------------------------------------------------------------------
// If the tuples are known to already be sorted, Step 2 is skipped. In
// that case, K_work is NULL (not allocated), which implicitly means that
// K_work [k] = k for all k = 0:nvals-1. K_work is always NULL if Sx and
// Tx are iso.
if (!known_sorted)
{
//----------------------------------------------------------------------
// allocate K_work workspace (not needed if T and Sx are iso)
//----------------------------------------------------------------------
if (!S_iso)
{
// create the k part of each tuple
K_work = GB_MALLOC_WERK (nvals, int64_t, &K_work_size) ;
if (K_work == NULL)
{
// out of memory
GB_FREE_WORK ;
return (GrB_OUT_OF_MEMORY) ;
}
// The k part of each tuple (i,k) or (j,i,k) records the original
// position of the tuple in the input list. This allows an
// unstable sorting algorithm to be used. Since k is unique, it
// forces the result of the sort to be stable regardless of whether
// or not the sorting algorithm is stable. It also keeps track of
// where the numerical value of the tuple can be found; it is in
// Sx[k] for the tuple (i,k) or (j,i,k), regardless of where the
// tuple appears in the list after it is sorted.
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < nvals ; k++)
{
K_work [k] = k ;
}
}
//----------------------------------------------------------------------
// sort all the tuples
//----------------------------------------------------------------------
if (vdim > 1)
{
//------------------------------------------------------------------
// sort a set of (j,i,k) tuples
//------------------------------------------------------------------
if (S_iso)
{
// K_work is NULL; only sort (j,i)
info = GB_msort_2 (J_work, I_work, nvals, nthreads) ;
}
else
{
info = GB_msort_3 (J_work, I_work, K_work, nvals, nthreads) ;
}
#ifdef GB_DEBUG
if (info == GrB_SUCCESS)
{
int64_t ilast = -1 ;
int64_t jlast = -1 ;
for (int64_t k = 0 ; k < nvals ; k++)
{
int64_t i = I_work [k] ;
int64_t j = J_work [k] ;
ASSERT ((jlast < j) || (jlast == j && ilast <= i)) ;
ilast = i ;
jlast = j ;
}
}
#endif
}
else
{
//------------------------------------------------------------------
// sort a set of (i,k) tuples
//------------------------------------------------------------------
if (S_iso)
{
// K_work is NULL; only sort (i)
info = GB_msort_1 (I_work, nvals, nthreads) ;
}
else
{
info = GB_msort_2 (I_work, K_work, nvals, nthreads) ;
}
#ifdef GB_DEBUG
if (info == GrB_SUCCESS)
{
int64_t ilast = -1 ;
for (int64_t k = 0 ; k < nvals ; k++)
{
int64_t i = I_work [k] ;
ASSERT (ilast <= i) ;
ilast = i ;
}
}
#endif
}
if (info != GrB_SUCCESS)
{
// out of memory in GB_msort_*
GB_FREE_WORK ;
return (GrB_OUT_OF_MEMORY) ;
}
}
//--------------------------------------------------------------------------
// STEP 3: count vectors and duplicates in each slice
//--------------------------------------------------------------------------
// Duplicates are located, counted and their indices negated. The # of
// vectors in each slice is counted. If the indices are known to not have
// duplicates, then only the vectors are counted. Counting the # of
// vectors is skipped if already done by Step 1.
if (known_no_duplicates)
{
//----------------------------------------------------------------------
// no duplicates: just count # vectors in each slice
//----------------------------------------------------------------------
// This is much faster, particularly if the # of vectors in each slice
// has already been computed.
#ifdef GB_DEBUG
{
// assert that there are no duplicates
int64_t ilast = -1, jlast = -1 ;
for (int64_t t = 0 ; t < nvals ; t++)
{
int64_t i = GB_I_WORK (t), j = GB_J_WORK (t) ;
bool is_duplicate = (i == ilast && j == jlast) ;
ASSERT (!is_duplicate) ;
ilast = i ; jlast = j ;
}
}
#endif
if (vdim <= 1)
{
// all tuples appear in at most one vector, and there are no
// duplicates, so there is no need to scan I_work or J_work.
for (int tid = 0 ; tid < nthreads ; tid++)
{
int64_t tstart = tstart_slice [tid] ;
int64_t tend = tstart_slice [tid+1] ;
tnvec_slice [tid] = 0 ;
tnz_slice [tid] = tend - tstart ;
}
tnvec_slice [0] = (nvals == 0) ? 0 : 1 ;
}
else
{
// count the # of unique vector indices in J_work. No need to scan
// I_work since there are no duplicates to be found. Also no need
// to compute them if already found in Step 1.
if (!tnvec_and_tnz_slice_computed)
{
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t my_tnvec = 0 ;
int64_t tstart = tstart_slice [tid] ;
int64_t tend = tstart_slice [tid+1] ;
int64_t jlast = GB_J_WORK (tstart-1) ;
for (int64_t t = tstart ; t < tend ; t++)
{
// get the t-th tuple
int64_t j = J_work [t] ;
if (j > jlast)
{
// vector j starts in this slice
my_tnvec++ ;
jlast = j ;
}
}
tnvec_slice [tid] = my_tnvec ;
tnz_slice [tid] = tend - tstart ;
}
}
}
}
else
{
//----------------------------------------------------------------------
// look for duplicates and count # vectors in each slice
//----------------------------------------------------------------------
for (int tid = 0 ; tid < nthreads ; tid++)
{
int64_t tstart = tstart_slice [tid] ;
ilast_slice [tid] = GB_I_WORK (tstart-1) ;
}
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t my_tnvec = 0 ;
int64_t my_ndupl = 0 ;
int64_t tstart = tstart_slice [tid] ;
int64_t tend = tstart_slice [tid+1] ;
int64_t ilast = ilast_slice [tid] ;
int64_t jlast = GB_J_WORK (tstart-1) ;
for (int64_t t = tstart ; t < tend ; t++)
{
// get the t-th tuple
int64_t i = I_work [t] ;
int64_t j = GB_J_WORK (t) ;
// tuples are now sorted but there may be duplicates
ASSERT ((jlast < j) || (jlast == j && ilast <= i)) ;
// check if (j,i,k) is a duplicate
if (i == ilast && j == jlast)
{
// flag the tuple as a duplicate
I_work [t] = -1 ;
my_ndupl++ ;
// the sort places earlier duplicate tuples (with smaller
// k) after later ones (with larger k).
ASSERT (GB_K_WORK (t-1) < GB_K_WORK (t)) ;
}
else
{
// this is a new tuple
if (j > jlast)
{
// vector j starts in this slice
my_tnvec++ ;
jlast = j ;
}
ilast = i ;
}
}
tnvec_slice [tid] = my_tnvec ;
tnz_slice [tid] = (tend - tstart) - my_ndupl ;
}
}
//--------------------------------------------------------------------------
// find total # of vectors and duplicates in all tuples
//--------------------------------------------------------------------------
// Replace tnvec_slice with its cumulative sum, after which each slice tid
// will be responsible for the # vectors in T that range from tnvec_slice
// [tid] to tnvec_slice [tid+1]-1.
GB_cumsum (tnvec_slice, nthreads, NULL, 1, NULL) ;
int64_t tnvec = tnvec_slice [nthreads] ;
// Replace tnz_slice with its cumulative sum
GB_cumsum (tnz_slice, nthreads, NULL, 1, NULL) ;
// find the total # of final entries, after assembling duplicates
int64_t tnz = tnz_slice [nthreads] ;
int64_t ndupl = nvals - tnz ;
//--------------------------------------------------------------------------
// allocate T; always hypersparse
//--------------------------------------------------------------------------
// allocate T; allocate T->p and T->h but do not initialize them.
// T is always hypersparse. The header T always exists on input, as
// either a static or dynamic header.
bool static_header = T->static_header ;
info = GB_new (&T, static_header, // always hyper, static or dynamic header
ttype, vlen, vdim, GB_Ap_malloc, is_csc,
GxB_HYPERSPARSE, GB_ALWAYS_HYPER, tnvec, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_FREE_WORK ;
return (info) ;
}
ASSERT (T->p != NULL) ;
ASSERT (T->h != NULL) ;
ASSERT (T->b == NULL) ;
ASSERT (T->i == NULL) ;
ASSERT (T->x == NULL) ;
T->iso = S_iso ; // OK: T is iso if and only if Sx is iso
bool do_burble = (vlen > 1 || vdim > 1) && (nvals > 1) ;
if (do_burble)
{
if (S_iso)
{
GBURBLE ("(iso build) ") ;
}
else
{
GBURBLE ("(build) ") ;
}
}
//--------------------------------------------------------------------------
// STEP 4: construct the vector pointers and hyperlist for T
//--------------------------------------------------------------------------
// Step 4 scans the J_work indices and constructs T->h and T->p.
int64_t *restrict Th = T->h ;
int64_t *restrict Tp = T->p ;
if (vdim <= 1)
{
//----------------------------------------------------------------------
// special case for vectors
//----------------------------------------------------------------------
ASSERT (tnvec == 0 || tnvec == 1) ;
if (tnvec > 0)
{
Th [0] = 0 ;
Tp [0] = 0 ;
}
}
else if (ndupl == 0)
{
//----------------------------------------------------------------------
// no duplicates appear
//----------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t my_tnvec = tnvec_slice [tid] ;
int64_t tstart = tstart_slice [tid] ;
int64_t tend = tstart_slice [tid+1] ;
int64_t jlast = GB_J_WORK (tstart-1) ;
for (int64_t t = tstart ; t < tend ; t++)
{
// get the t-th tuple
int64_t j = GB_J_WORK (t) ;
if (j > jlast)
{
// vector j starts in this slice
Th [my_tnvec] = j ;
Tp [my_tnvec] = t ;
my_tnvec++ ;
jlast = j ;
}
}
}
}
else
{
//----------------------------------------------------------------------
// it is known that at least one duplicate appears
//----------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t my_tnz = tnz_slice [tid] ;
int64_t my_tnvec = tnvec_slice [tid] ;
int64_t tstart = tstart_slice [tid] ;
int64_t tend = tstart_slice [tid+1] ;
int64_t jlast = GB_J_WORK (tstart-1) ;
for (int64_t t = tstart ; t < tend ; t++)
{
// get the t-th tuple
int64_t i = I_work [t] ;
int64_t j = GB_J_WORK (t) ;
if (i >= 0)
{
// this is a new tuple
if (j > jlast)
{
// vector j starts in this slice
Th [my_tnvec] = j ;
Tp [my_tnvec] = my_tnz ;
my_tnvec++ ;
jlast = j ;
}
my_tnz++ ;
}
}
}
}
// log the end of the last vector
T->nvec_nonempty = tnvec ;
T->nvec = tnvec ;
Tp [tnvec] = tnz ;
ASSERT (T->nvec == T->plen) ;
T->magic = GB_MAGIC ;
//--------------------------------------------------------------------------
// free J_work if it exists
//--------------------------------------------------------------------------
ASSERT (J_work_handle != NULL) ;
GB_FREE (J_work_handle, *J_work_size_handle) ;
J_work = NULL ;
//--------------------------------------------------------------------------
// allocate T->i
//--------------------------------------------------------------------------
if (ndupl == 0)
{
// shrink I_work from size ijslen to size tnz
if (tnz < ijslen)
{
// this cannot fail since the size is shrinking.
bool ok ;
GB_REALLOC (I_work, tnz, int64_t, I_work_size_handle, &ok, Context);
ASSERT (ok) ;
}
// transplant I_work into T->i
T->i = I_work ; T->i_size = (*I_work_size_handle) ;
I_work = NULL ;
(*I_work_handle) = NULL ;
(*I_work_size_handle) = 0 ;
}
else
{
// duplicates exist, so allocate a new T->i. I_work must be freed later
T->i = GB_MALLOC (tnz, int64_t, &(T->i_size)) ;
if (T->i == NULL)
{
// out of memory
GB_phbix_free (T) ;
GB_FREE_WORK ;
return (GrB_OUT_OF_MEMORY) ;
}
}
int64_t *restrict Ti = T->i ;
//==========================================================================
// numerical phase of the build: assemble any duplicates
//==========================================================================
// The tuples have been sorted. Assemble any duplicates with a switch
// factory of built-in workers, or four generic workers. The vector
// pointers T->p and hyperlist T->h (if hypersparse) have already been
// computed.
// If there are no duplicates, T->i holds the row indices of the tuple.
// Otherwise, the row indices are still in I_work. K_work holds the
// positions of each tuple in the array Sx. The tuples are sorted so that
// duplicates are adjacent to each other and they appear in the order they
// appeared in the original tuples. This method assembles the duplicates
// and computes T->i and T->x from I_work, K_work, and Sx. into T, becoming
// T->i. If no duplicates appear, T->i is already computed, and Sx just
// needs to be copied and permuted into T->x.
// The (i,k,Sx[k]) tuples are held in two integer arrays: (1) I_work or
// T->i, and (2) K_work, and an array Sx of numerical values. Sx has not
// been sorted, nor even accessed yet. It is identical to the original
// unsorted tuples. The (i,k,Sx[k]) tuple holds the row index i, the
// position k, and the value Sx [k]. This entry becomes T(i,j) = Sx [k] in
// the matrix T, and duplicates (if any) are assembled via the dup
// operator.
//--------------------------------------------------------------------------
// get opcodes and check types
//--------------------------------------------------------------------------
// With GB_build, there can be 1 to 2 different types.
// T->type is identical to the types of x,y,z for z=dup(x,y).
// dup is never NULL and all its three types are the same
// The type of Sx (stype) can different but must be compatible
// with T->type
// With GB_wait, there can be 1 to 5 different types:
// The pending tuples are in Sx, of type stype which must be
// compatible with dup->ytype and T->type
// z = dup (x,y): can be NULL or have 1 to 3 different types
// T->type: must be compatible with all above types.
// dup may be NULL, in which case it is assumed be the implicit SECOND
// operator, with all three types equal to T->type
GrB_Type xtype, ytype, ztype ;
GxB_binary_function fdup ;
#ifndef GBCOMPACT
GB_Opcode opcode ;
#endif
GB_Type_code tcode = ttype->code ;
const size_t tsize = ttype->size ;
bool op_2nd ;
ASSERT_TYPE_OK (ttype, "ttype for build_factory", GB0) ;
if (dup == NULL)
{
//----------------------------------------------------------------------
// dup is the implicit SECOND operator
//----------------------------------------------------------------------
// z = SECOND (x,y) where all three types are the same as ttype
// T(i,j) = (ttype) Sx(k) will be done for all tuples.
#ifndef GBCOMPACT
opcode = GB_SECOND_opcode ;
#endif
xtype = ttype ;
ytype = ttype ;
ztype = ttype ;
fdup = NULL ;
op_2nd = true ;
ASSERT (GB_op_is_second (dup, ttype)) ;
}
else
{
//----------------------------------------------------------------------
// dup is an explicit operator
//----------------------------------------------------------------------
// T(i,j) = (ttype) Sx[k] will be done for the first tuple.
// for subsequent tuples: T(i,j) += Sx[k], via the dup operator and
// typecasting:
//
// y = (dup->ytype) Sx[k]
// x = (dup->xtype) T(i,j)
// z = (dup->ztype) dup (x,y)
// T(i,j) = (ttype) z
ASSERT_BINARYOP_OK (dup, "dup for build_factory", GB0) ;
ASSERT (!S_iso) ;
#ifndef GBCOMPACT
opcode = dup->opcode ;
#endif
xtype = dup->xtype ;
ytype = dup->ytype ;
ztype = dup->ztype ;
fdup = dup->function ;
op_2nd = GB_op_is_second (dup, ttype) ;
}
//--------------------------------------------------------------------------
// get the sizes and codes of each type
//--------------------------------------------------------------------------
GB_Type_code zcode = ztype->code ;
GB_Type_code xcode = xtype->code ;
GB_Type_code ycode = ytype->code ;
ASSERT (GB_Type_compatible (ttype, stype)) ; // T(i,j) = (ttype) Sx
ASSERT (GB_Type_compatible (ytype, stype)) ; // y = (ytype) Sx
ASSERT (GB_Type_compatible (xtype, ttype)) ; // x = (xtype) T(i,j)
ASSERT (GB_Type_compatible (ttype, ztype)) ; // T(i,j) = (ttype) z
size_t zsize = ztype->size ;
size_t xsize = xtype->size ;
size_t ysize = ytype->size ;
// no typecasting if all 5 types are the same
bool nocasting = (ttype == stype) &&
(ttype == xtype) && (ttype == ytype) && (ttype == ztype) ;
ASSERT_TYPE_OK (ttype, "ttype for build_factory", GB0) ;
ASSERT_TYPE_OK (stype, "stype for build_factory", GB0) ;
ASSERT_TYPE_OK (xtype, "xtype for build_factory", GB0) ;
ASSERT_TYPE_OK (ytype, "ytype for build_factory", GB0) ;
ASSERT_TYPE_OK (ztype, "ztype for build_factory", GB0) ;
//--------------------------------------------------------------------------
// STEP 5: assemble the tuples
//--------------------------------------------------------------------------
bool copy_S_into_T = (nocasting && known_sorted && ndupl == 0) ;
if (copy_S_into_T && S_work != NULL)
{
//----------------------------------------------------------------------
// transplant S_work into T->x
//----------------------------------------------------------------------
// No typecasting is needed, the tuples were originally in sorted
// order, and no duplicates appear. All that is required is to copy Sx
// into Tx. Sx can be directly transplanted into T->x since Sx is
// provided as S_work. GB_builder must either transplant or free
// S_work. The transplant can be used by GB_wait, whenever the tuples
// are already sorted, with no duplicates, and no typecasting is
// needed, since S_work is always A->Pending->x. T and Sx may be iso
// or non-iso.
T->x = S_work ; T->x_size = (*S_work_size_handle) ;
S_work = NULL ;
(*S_work_handle) = NULL ;
(*S_work_size_handle) = 0 ;
int64_t tx_size_required = tnz * tsize ;
if (2 * tx_size_required < T->x_size)
{
// shrink the size of T->x
bool ok = true ;
GB_REALLOC (T->x, tx_size_required, GB_void, &(T->x_size), &ok,
Context) ;
}
}
else
{
//----------------------------------------------------------------------
// allocate T->x
//----------------------------------------------------------------------
T->x = GB_XALLOC (S_iso, tnz, tsize, &(T->x_size)) ;
if (T->x == NULL)
{
// out of memory
GB_phbix_free (T) ;
GB_FREE_WORK ;
return (GrB_OUT_OF_MEMORY) ;
}
GB_void *restrict Tx = (GB_void *) T->x ;
ASSERT (GB_IMPLIES (nvals > 0, Sx != NULL)) ;
if (nvals == 0)
{
// nothing to do
}
else if (copy_S_into_T)
{
//------------------------------------------------------------------
// copy Sx into T->x
//------------------------------------------------------------------
// No typecasting is needed, the tuples were originally in sorted
// order, and no duplicates appear. All that is required is to
// copy Sx into Tx. Sx cannot be transplanted into T->x since
// S_work is NULL and S_input cannot be modified by GB_builder.
ASSERT (S_work == NULL) ;
ASSERT (Sx == S_input) ;
GB_memcpy (Tx, Sx, (S_iso ? 1 : nvals) * tsize, nthreads) ;
}
else if (nocasting)
{
//------------------------------------------------------------------
// assemble the values, Sx, into T, no typecasting needed
//------------------------------------------------------------------
// Sx (either S_work or S_input) must be permuted and copied into
// T->x, since the tuples had to be sorted, or duplicates appear.
// Any duplicates are now assembled.
// There are 44 common cases of this function for built-in types
// and 8 associative operators: MIN, MAX, PLUS, TIMES for 10 types
// (all but boolean; and OR, AND, XOR, and EQ for boolean.
// In addition, the FIRST and SECOND operators are hard-coded, for
// another 22 workers, since SECOND is used by GB_wait and since
// FIRST is useful for keeping the first tuple seen. It is
// controlled by the GB_INCLUDE_SECOND_OPERATOR definition, so they
// do not appear in GB_reduce_to_* where the FIRST and SECOND
// operators are not needed.
// Early exit cannot be exploited, so the terminal is ignored.
bool done = false ;
if (S_iso)
{
//--------------------------------------------------------------
// T and Sx are iso; set iso value and delete duplicates
//--------------------------------------------------------------
memcpy (Tx, Sx, tsize) ;
#define GB_ISO_BUILD
#include "GB_reduce_build_template.c"
done = true ;
}
else
{
//--------------------------------------------------------------
// T and Sx are not iso; call in the workers
//--------------------------------------------------------------
#ifndef GBCOMPACT
//----------------------------------------------------------
// define the worker for the switch factory
//----------------------------------------------------------
#define GB_INCLUDE_SECOND_OPERATOR
#define GB_red(opname,aname) \
GB (_red_build_ ## opname ## aname)
#define GB_RED_WORKER(opname,aname,atype) \
{ \
info = GB_red (opname, aname) ((atype *) Tx, Ti, \
(atype *) Sx, nvals, ndupl, I_work, K_work, \
tstart_slice, tnz_slice, nthreads) ; \
done = (info != GrB_NO_VALUE) ; \
} \
break ;
//----------------------------------------------------------
// launch the switch factory
//----------------------------------------------------------
// controlled by opcode and typecode
GB_Type_code typecode = tcode ;
#include "GB_red_factory.c"
#endif
}
//------------------------------------------------------------------
// generic worker
//------------------------------------------------------------------
if (!done)
{
if (do_burble) GBURBLE ("(generic build) ") ;
//--------------------------------------------------------------
// no typecasting, but use the fdup function pointer and memcpy
//--------------------------------------------------------------
// Either the fdup operator or type of Sx and T are
// user-defined, or fdup is not an associative operator handled
// by the GB_red_factory, or some combination of these
// conditions. User-defined types cannot be typecasted, so
// this handles all user-defined types.
// Tx [p] = (ttype) Sx [k], but with no typecasting
#undef GB_CAST_ARRAY_TO_ARRAY
#define GB_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \
memcpy (Tx +((p)*tsize), Sx +((k)*tsize), tsize) ;
if (op_2nd)
{
//----------------------------------------------------------
// dup is the SECOND operator, with no typecasting
//----------------------------------------------------------
// Tx [p] += (ttype) Sx [k], but 2nd op and no typecasting
#undef GB_ADD_CAST_ARRAY_TO_ARRAY
#define GB_ADD_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \
GB_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k)
#include "GB_reduce_build_template.c"
}
else
{
//----------------------------------------------------------
// dup is another operator, with no typecasting needed
//----------------------------------------------------------
// Tx [p] += (ttype) Sx [k], but with no typecasting
#undef GB_ADD_CAST_ARRAY_TO_ARRAY
#define GB_ADD_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \
fdup (Tx +((p)*tsize), Tx +((p)*tsize), Sx+((k)*tsize));
#include "GB_reduce_build_template.c"
}
}
}
else
{
//------------------------------------------------------------------
// assemble the values Sx into T, typecasting as needed
//------------------------------------------------------------------
if (do_burble)
{
GBURBLE ("(generic build with typecast) ") ;
}
// If T and Sx are iso, no typecasting is ever done, so this method
// is not used in that case.
ASSERT (!S_iso) ;
// Sx (either S_work or S_input) must be permuted and copied into
// T->x, since the tuples had to be sorted, or duplicates appear.
// Any duplicates are now assembled. Not all of the 5 types are
// the same, but all of them are built-in since user-defined types
// cannot be typecasted.
const GB_Type_code scode = stype->code ;
const size_t ssize = stype->size ;
GB_cast_function cast_S_to_T = GB_cast_factory (tcode, scode) ;
GB_cast_function cast_S_to_Y = GB_cast_factory (ycode, scode) ;
GB_cast_function cast_T_to_X = GB_cast_factory (xcode, tcode) ;
GB_cast_function cast_Z_to_T = GB_cast_factory (tcode, zcode) ;
ASSERT (scode <= GB_FC64_code) ;
ASSERT (tcode <= GB_FC64_code) ;
ASSERT (xcode <= GB_FC64_code) ;
ASSERT (ycode <= GB_FC64_code) ;
ASSERT (zcode <= GB_FC64_code) ;
// Tx [p] = (ttype) Sx [k], with typecasting
#undef GB_CAST_ARRAY_TO_ARRAY
#define GB_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \
cast_S_to_T (Tx +((p)*tsize), Sx +((k)*ssize), ssize) ;
if (op_2nd)
{
//--------------------------------------------------------------
// dup operator is the SECOND operator, with typecasting
//--------------------------------------------------------------
// Tx [p] += (ttype) Sx [k], but 2nd op, with typecasting
#undef GB_ADD_CAST_ARRAY_TO_ARRAY
#define GB_ADD_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \
GB_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k)
#include "GB_reduce_build_template.c"
}
else
{
//--------------------------------------------------------------
// dup is another operator, with typecasting required
//--------------------------------------------------------------
// Tx [p] += Sx [k], with typecasting
#undef GB_ADD_CAST_ARRAY_TO_ARRAY
#define GB_ADD_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \
{ \
/* ywork = (ytype) Sx [k] */ \
GB_void ywork [GB_VLA(ysize)] ; \
cast_S_to_Y (ywork, Sx +((k)*ssize), ssize) ; \
/* xwork = (xtype) Tx [p] */ \
GB_void xwork [GB_VLA(xsize)] ; \
cast_T_to_X (xwork, Tx +((p)*tsize), tsize) ; \
/* zwork = f (xwork, ywork) */ \
GB_void zwork [GB_VLA(zsize)] ; \
fdup (zwork, xwork, ywork) ; \
/* Tx [tnz-1] = (ttype) zwork */ \
cast_Z_to_T (Tx +((p)*tsize), zwork, zsize) ; \
}
#include "GB_reduce_build_template.c"
}
}
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORK ;
T->jumbled = false ;
ASSERT_MATRIX_OK (T, "T built", GB0) ;
ASSERT (GB_IS_HYPERSPARSE (T)) ;
return (GrB_SUCCESS) ;
}
|
displacement_contact_criteria.h | // KRATOS ___| | | |
// \___ \ __| __| | | __| __| | | __| _` | |
// | | | | | ( | | | | ( | |
// _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS
//
// License: BSD License
// license: StructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_DISPLACEMENT_CONTACT_CRITERIA_H)
#define KRATOS_DISPLACEMENT_CONTACT_CRITERIA_H
/* System includes */
/* External includes */
/* Project includes */
#include "utilities/table_stream_utility.h"
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
#include "utilities/color_utilities.h"
namespace Kratos
{
///@addtogroup ContactStructuralMechanicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@name Kratos Classes
///@{
/**
* @class DisplacementContactCriteria
* @ingroup ContactStructuralMechanicsApplication
* @brief Convergence criteria for contact problems
* @details This class implements a convergence control based on nodal displacement (for penalty contact)
* @author Vicente Mataix Ferrandiz
*/
template< class TSparseSpace,
class TDenseSpace >
class DisplacementContactCriteria
: public ConvergenceCriteria< TSparseSpace, TDenseSpace >
{
public:
///@name Type Definitions
///@{
/// Pointer definition of DisplacementContactCriteria
KRATOS_CLASS_POINTER_DEFINITION( DisplacementContactCriteria );
/// Local Flags
KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT );
KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED );
/// The base class definition (and it subclasses)
typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
/// The sparse space used
typedef TSparseSpace SparseSpaceType;
/// The table stream definition TODO: Replace by logger
typedef TableStreamUtility::Pointer TablePrinterPointerType;
/// The index type definition
typedef std::size_t IndexType;
/// The key type definition
typedef std::size_t KeyType;
///@}
///@name Life Cycle
///@{
/// Constructor.
/**
* @param DispRatioTolerance Relative tolerance for displacement error
* @param DispAbsTolerance Absolute tolerance for displacement error
* @param pTable The pointer to the output table
* @param PrintingOutput If the output is going to be printed in a txt file
*/
explicit DisplacementContactCriteria(
const TDataType DispRatioTolerance,
const TDataType DispAbsTolerance,
const bool PrintingOutput = false
)
: BaseType()
{
// Set local flags
mOptions.Set(DisplacementContactCriteria::PRINTING_OUTPUT, PrintingOutput);
mOptions.Set(DisplacementContactCriteria::TABLE_IS_INITIALIZED, false);
// The displacement solution
mDispRatioTolerance = DispRatioTolerance;
mDispAbsTolerance = DispAbsTolerance;
}
/**
* @brief Default constructor (parameters)
* @param ThisParameters The configuration parameters
*/
explicit DisplacementContactCriteria( Parameters ThisParameters = Parameters(R"({})"))
: BaseType()
{
// The default parameters
Parameters default_parameters = Parameters(R"(
{
"ensure_contact" : false,
"print_convergence_criterion" : false,
"displacement_relative_tolerance" : 1.0e-4,
"displacement_absolute_tolerance" : 1.0e-9
})" );
ThisParameters.ValidateAndAssignDefaults(default_parameters);
// The displacement solution
mDispRatioTolerance = ThisParameters["displacement_relative_tolerance"].GetDouble();
mDispAbsTolerance = ThisParameters["displacement_absolute_tolerance"].GetDouble();
// Set local flags
mOptions.Set(DisplacementContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool());
mOptions.Set(DisplacementContactCriteria::TABLE_IS_INITIALIZED, false);
}
// Copy constructor.
DisplacementContactCriteria( DisplacementContactCriteria const& rOther )
:BaseType(rOther)
,mOptions(rOther.mOptions)
,mDispRatioTolerance(rOther.mDispRatioTolerance)
,mDispAbsTolerance(rOther.mDispAbsTolerance)
{
}
/// Destructor.
~DisplacementContactCriteria() override = default;
///@}
///@name Operators
///@{
/**
* @brief Compute relative and absolute error.
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PostCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
if (SparseSpaceType::Size(rDx) != 0) { //if we are solving for something
// Initialize
TDataType disp_solution_norm = 0.0, disp_increase_norm = 0.0;
IndexType disp_dof_num(0);
// First iterator
const auto it_dof_begin = rDofSet.begin();
// Auxiliar values
std::size_t dof_id = 0;
TDataType dof_value = 0.0, dof_incr = 0.0;
// Loop over Dofs
#pragma omp parallel for reduction(+:disp_solution_norm,disp_increase_norm,disp_dof_num,dof_id,dof_value,dof_incr)
for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) {
auto it_dof = it_dof_begin + i;
if (it_dof->IsFree()) {
dof_id = it_dof->EquationId();
dof_value = it_dof->GetSolutionStepValue(0);
dof_incr = rDx[dof_id];
const auto curr_var = it_dof->GetVariable();
disp_solution_norm += dof_value * dof_value;
disp_increase_norm += dof_incr * dof_incr;
disp_dof_num++;
}
}
if(disp_increase_norm == 0.0) disp_increase_norm = 1.0;
if(disp_solution_norm == 0.0) disp_solution_norm = 1.0;
const TDataType disp_ratio = std::sqrt(disp_increase_norm/disp_solution_norm);
const TDataType disp_abs = std::sqrt(disp_increase_norm)/ static_cast<TDataType>(disp_dof_num);
// The process info of the model part
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// We print the results // TODO: Replace for the new log
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
std::cout.precision(4);
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& Table = p_table->GetTable();
Table << disp_ratio << mDispRatioTolerance << disp_abs << mDispAbsTolerance;
} else {
std::cout.precision(4);
if (mOptions.IsNot(DisplacementContactCriteria::PRINTING_OUTPUT)) {
KRATOS_INFO("DisplacementContactCriteria") << BOLDFONT("DoF ONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl;
KRATOS_INFO("DisplacementContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl;
} else {
KRATOS_INFO("DisplacementContactCriteria") << "DoF ONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl;
KRATOS_INFO("DisplacementContactCriteria") << "\tDISPLACEMENT: RATIO = " << disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl;
}
}
}
// We check if converged
const bool disp_converged = (disp_ratio <= mDispRatioTolerance || disp_abs <= mDispAbsTolerance);
if (disp_converged) {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& table = p_table->GetTable();
if (mOptions.IsNot(DisplacementContactCriteria::PRINTING_OUTPUT))
table << BOLDFONT(FGRN(" Achieved"));
else
table << "Achieved";
} else {
if (mOptions.IsNot(DisplacementContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementContactCriteria") << BOLDFONT("\tDoF") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl;
else
KRATOS_INFO("DisplacementContactCriteria") << "\tDoF convergence is achieved" << std::endl;
}
}
return true;
} else {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& table = p_table->GetTable();
if (mOptions.IsNot(DisplacementContactCriteria::PRINTING_OUTPUT))
table << BOLDFONT(FRED(" Not achieved"));
else
table << "Not achieved";
} else {
if (mOptions.IsNot(DisplacementContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementContactCriteria") << BOLDFONT("\tDoF") << " convergence is " << BOLDFONT(FRED(" not achieved")) << std::endl;
else
KRATOS_INFO("DisplacementContactCriteria") << "\tDoF convergence is not achieved" << std::endl;
}
}
return false;
}
}
else // In this case all the displacements are imposed!
return true;
}
/**
* @brief This function initialize the convergence criteria
* @param rModelPart Reference to the ModelPart containing the contact problem. (unused)
*/
void Initialize( ModelPart& rModelPart ) override
{
BaseType::mConvergenceCriteriaIsInitialized = true;
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementContactCriteria::TABLE_IS_INITIALIZED)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& table = p_table->GetTable();
table.AddColumn("DP RATIO", 10);
table.AddColumn("EXP. RAT", 10);
table.AddColumn("ABS", 10);
table.AddColumn("EXP. ABS", 10);
table.AddColumn("CONVERGENCE", 15);
mOptions.Set(DisplacementContactCriteria::TABLE_IS_INITIALIZED, true);
}
}
///@}
///@name Operations
///@{
///@}
///@name Acces
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
Flags mOptions; /// Local flags
TDataType mDispRatioTolerance; /// The ratio threshold for the norm of the displacement
TDataType mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Unaccessible methods
///@{
///@}
}; // Kratos DisplacementContactCriteria
///@name Local flags creation
///@{
/// Local Flags
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2));
}
#endif /* KRATOS_DISPLACEMENT_CONTACT_CRITERIA_H */
|
GB_binop__isge_uint64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isge_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__isge_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__isge_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__isge_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_uint64)
// A*D function (colscale): GB (_AxD__isge_uint64)
// D*A function (rowscale): GB (_DxB__isge_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__isge_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__isge_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_uint64)
// C=scalar+B GB (_bind1st__isge_uint64)
// C=scalar+B' GB (_bind1st_tran__isge_uint64)
// C=A+scalar GB (_bind2nd__isge_uint64)
// C=A'+scalar GB (_bind2nd_tran__isge_uint64)
// C type: uint64_t
// A type: uint64_t
// A pattern? 0
// B type: uint64_t
// B pattern? 0
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGE || GxB_NO_UINT64 || GxB_NO_ISGE_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isge_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isge_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint64_t alpha_scalar ;
uint64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isge_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isge_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isge_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isge_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__isge_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
QLA_D3_M_veq_M_times_pM.c | /**************** QLA_D3_M_veq_M_times_pM.c ********************/
#include <stdio.h>
#include <qla_config.h>
#include <qla_types.h>
#include <qla_random.h>
#include <qla_cmath.h>
#include <qla_d3.h>
#include <math.h>
static void start_slice(){
__asm__ __volatile__ ("");
}
static void end_slice(){
__asm__ __volatile__ ("");
}
void QLA_D3_M_veq_M_times_pM ( QLA_D3_ColorMatrix *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorMatrix *restrict *b, int n)
{
start_slice();
#ifdef HAVE_XLC
#pragma disjoint(*r,*a,**b)
__alignx(16,r);
__alignx(16,a);
#endif
#pragma omp parallel for
for(int i=0; i<n; i++) {
#ifdef HAVE_XLC
__alignx(16,b[i]);
#endif
for(int i_c=0; i_c<3; i_c++) {
for(int j_c=0; j_c<3; j_c++) {
QLA_D_Complex x;
QLA_c_eq_r(x,0.);
for(int k_c=0; k_c<3; k_c++) {
QLA_c_peq_c_times_c(x, QLA_D3_elem_M(a[i],i_c,k_c), QLA_D3_elem_M(*b[i],k_c,j_c));
}
QLA_c_eq_c(QLA_D3_elem_M(r[i],i_c,j_c),x);
}
}
}
end_slice();
}
|
mandelbrot.c | /*
To compile:
gcc -O3 -o mandelbrot mandelbrot.c png_util.c -I. -lpng -lm -fopenmp
Or just type:
module load gcc
make
To create an image with 4096 x 4096 pixels (last argument will be used to set number of threads):
./mandelbrot 4096 4096 1
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "png_util.h"
// Q2a: add include for OpenMP header file here:
#include "omp.h"
#define MXITER 1000
typedef struct {
double r;
double i;
}complex_t;
// return iterations before z leaves mandelbrot set for given c
int testpoint(complex_t c){
int iter;
complex_t z;
double temp;
z = c;
for(iter=0; iter<MXITER; iter++){
temp = (z.r*z.r) - (z.i*z.i) + c.r;
z.i = z.r*z.i*2. + c.i;
z.r = temp;
if((z.r*z.r+z.i*z.i)>4.0){
return iter;
}
}
return iter;
}
// perform Mandelbrot iteration on a grid of numbers in the complex plane
// record the iteration counts in the count array
void mandelbrot(int Nre, int Nim, complex_t cmin, complex_t cmax, float *count){
int n,m;
complex_t c;
double dr = (cmax.r-cmin.r)/(Nre-1);
double di = (cmax.i-cmin.i)/(Nim-1);;
// Q2c: add a compiler directive to split the outer for loop amongst threads here
#pragma omp parallel for
for(n=0;n<Nim;++n){
for(m=0;m<Nre;++m){
c.r = cmin.r + dr*m;
c.i = cmin.i + di*n;
count[m+n*Nre] = testpoint(c);
}
}
}
int main(int argc, char **argv){
// to create a 4096x4096 pixel image [ last argument is placeholder for number of threads ]
// usage: ./mandelbrot 4096 4096 1
int Nre = atoi(argv[1]);
int Nim = atoi(argv[2]);
int Nthreads = atoi(argv[3]);
// Q2b: set the number of OpenMP threads to be Nthreads here:
omp_set_num_threads(Nthreads);
// storage for the iteration counts
float *count = (float*) malloc(Nre*Nim*sizeof(float));
// Parameters for a bounding box for "c" that generates an interesting image
const float centRe = -.759856, centIm= .125547;
const float diam = 0.151579;
complex_t cmin;
complex_t cmax;
cmin.r = centRe - 0.5*diam;
cmax.r = centRe + 0.5*diam;
cmin.i = centIm - 0.5*diam;
cmax.i = centIm + 0.5*diam;
// Q2d: complete this to read time before calling mandelbrot with OpenMP API wall clock time
double start = omp_get_wtime();
// compute mandelbrot set
mandelbrot(Nre, Nim, cmin, cmax, count);
// Q2d: complete this to read time after calling mandelbrot using OpenMP wall clock time
double end = omp_get_wtime();
// print elapsed time
printf("elapsed = %g\n", end-start);
// output mandelbrot to png format image
FILE *fp = fopen("mandelbrot.png", "w");
write_hot_png(fp, Nre, Nim, count, 0, 80);
exit(0);
return 0;
}
|
transform.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% TTTTT RRRR AAA N N SSSSS FFFFF OOO RRRR M M %
% T R R A A NN N SS F O O R R MM MM %
% T RRRR AAAAA N N N SSS FFF O O RRRR M M M %
% T R R A A N NN SS F O O R R M M %
% T R R A A N N SSSSS F OOO R R M M %
% %
% %
% MagickCore Image Transform Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image.h"
#include "MagickCore/memory_.h"
#include "MagickCore/layer.h"
#include "MagickCore/list.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/resource_.h"
#include "MagickCore/resize.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/transform-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o O r i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoOrientImage() adjusts an image so that its orientation is suitable for
% viewing (i.e. top-left orientation).
%
% The format of the AutoOrientImage method is:
%
% Image *AutoOrientImage(const Image *image,
% const OrientationType orientation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image.
%
% o orientation: Current image orientation.
%
% o exception: Return any errors or warnings in this structure.
%
*/
MagickExport Image *AutoOrientImage(const Image *image,
const OrientationType orientation,ExceptionInfo *exception)
{
Image
*orient_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
orient_image=(Image *) NULL;
switch(orientation)
{
case UndefinedOrientation:
case TopLeftOrientation:
default:
{
orient_image=CloneImage(image,0,0,MagickTrue,exception);
break;
}
case TopRightOrientation:
{
orient_image=FlopImage(image,exception);
break;
}
case BottomRightOrientation:
{
orient_image=RotateImage(image,180.0,exception);
break;
}
case BottomLeftOrientation:
{
orient_image=FlipImage(image,exception);
break;
}
case LeftTopOrientation:
{
orient_image=TransposeImage(image,exception);
break;
}
case RightTopOrientation:
{
orient_image=RotateImage(image,90.0,exception);
break;
}
case RightBottomOrientation:
{
orient_image=TransverseImage(image,exception);
break;
}
case LeftBottomOrientation:
{
orient_image=RotateImage(image,270.0,exception);
break;
}
}
if (orient_image != (Image *) NULL)
orient_image->orientation=TopLeftOrientation;
return(orient_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ChopImage() removes a region of an image and collapses the image to occupy
% the removed portion.
%
% The format of the ChopImage method is:
%
% Image *ChopImage(const Image *image,const RectangleInfo *chop_info)
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o chop_info: Define the region of the image to chop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ChopImage(const Image *image,const RectangleInfo *chop_info,
ExceptionInfo *exception)
{
#define ChopImageTag "Chop/Image"
CacheView
*chop_view,
*image_view;
Image
*chop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
extent;
ssize_t
y;
/*
Check chop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(chop_info != (RectangleInfo *) NULL);
if (((chop_info->x+(ssize_t) chop_info->width) < 0) ||
((chop_info->y+(ssize_t) chop_info->height) < 0) ||
(chop_info->x > (ssize_t) image->columns) ||
(chop_info->y > (ssize_t) image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
extent=(*chop_info);
if ((extent.x+(ssize_t) extent.width) > (ssize_t) image->columns)
extent.width=(size_t) ((ssize_t) image->columns-extent.x);
if ((extent.y+(ssize_t) extent.height) > (ssize_t) image->rows)
extent.height=(size_t) ((ssize_t) image->rows-extent.y);
if (extent.x < 0)
{
extent.width-=(size_t) (-extent.x);
extent.x=0;
}
if (extent.y < 0)
{
extent.height-=(size_t) (-extent.y);
extent.y=0;
}
chop_image=CloneImage(image,image->columns-extent.width,image->rows-
extent.height,MagickTrue,exception);
if (chop_image == (Image *) NULL)
return((Image *) NULL);
/*
Extract chop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
chop_view=AcquireAuthenticCacheView(chop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,chop_image,extent.y,1)
#endif
for (y=0; y < (ssize_t) extent.y; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,y,chop_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(chop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(chop_image,channel,p[i],q);
}
q+=GetPixelChannels(chop_image);
}
p+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ChopImage)
#endif
proceed=SetImageProgress(image,ChopImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
/*
Extract chop image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,chop_image,image->rows-(extent.y+extent.height),1)
#endif
for (y=0; y < (ssize_t) (image->rows-(extent.y+extent.height)); y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,extent.y+extent.height+y,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,extent.y+y,chop_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(chop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(chop_image,channel,p[i],q);
}
q+=GetPixelChannels(chop_image);
}
p+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ChopImage)
#endif
proceed=SetImageProgress(image,ChopImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
chop_view=DestroyCacheView(chop_view);
image_view=DestroyCacheView(image_view);
chop_image->type=image->type;
if (status == MagickFalse)
chop_image=DestroyImage(chop_image);
return(chop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s o l i d a t e C M Y K I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConsolidateCMYKImage() consolidates separate C, M, Y, and K planes into a
% single image.
%
% The format of the ConsolidateCMYKImage method is:
%
% Image *ConsolidateCMYKImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image sequence.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConsolidateCMYKImages(const Image *images,
ExceptionInfo *exception)
{
CacheView
*cmyk_view,
*image_view;
Image
*cmyk_image,
*cmyk_images;
register ssize_t
j;
ssize_t
y;
/*
Consolidate separate C, M, Y, and K planes into a single image.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cmyk_images=NewImageList();
for (j=0; j < (ssize_t) GetImageListLength(images); j+=4)
{
register ssize_t
i;
assert(images != (Image *) NULL);
cmyk_image=CloneImage(images,0,0,MagickTrue,
exception);
if (cmyk_image == (Image *) NULL)
break;
if (SetImageStorageClass(cmyk_image,DirectClass,exception) == MagickFalse)
break;
(void) SetImageColorspace(cmyk_image,CMYKColorspace,exception);
for (i=0; i < 4; i++)
{
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=QueueCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
Quantum
pixel;
pixel=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p));
switch (i)
{
case 0: SetPixelCyan(cmyk_image,pixel,q); break;
case 1: SetPixelMagenta(cmyk_image,pixel,q); break;
case 2: SetPixelYellow(cmyk_image,pixel,q); break;
case 3: SetPixelBlack(cmyk_image,pixel,q); break;
default: break;
}
p+=GetPixelChannels(images);
q+=GetPixelChannels(cmyk_image);
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
}
AppendImageToList(&cmyk_images,cmyk_image);
}
return(cmyk_images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImage() extracts a region of the image starting at the offset defined
% by geometry. Region must be fully defined, and no special handling of
% geometry flags is performed.
%
% The format of the CropImage method is:
%
% Image *CropImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to crop with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CropImage(const Image *image,const RectangleInfo *geometry,
ExceptionInfo *exception)
{
#define CropImageTag "Crop/Image"
CacheView
*crop_view,
*image_view;
Image
*crop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
OffsetInfo
offset;
RectangleInfo
bounding_box,
page;
ssize_t
y;
/*
Check crop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
bounding_box=image->page;
if ((bounding_box.width == 0) || (bounding_box.height == 0))
{
bounding_box.width=image->columns;
bounding_box.height=image->rows;
}
page=(*geometry);
if (page.width == 0)
page.width=bounding_box.width;
if (page.height == 0)
page.height=bounding_box.height;
if (((bounding_box.x-page.x) >= (ssize_t) page.width) ||
((bounding_box.y-page.y) >= (ssize_t) page.height) ||
((page.x-bounding_box.x) > (ssize_t) image->columns) ||
((page.y-bounding_box.y) > (ssize_t) image->rows))
{
/*
Crop is not within virtual canvas, return 1 pixel transparent image.
*/
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.alpha=(MagickRealType) TransparentAlpha;
crop_image->alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(crop_image,exception);
crop_image->page=bounding_box;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
if (crop_image->dispose == BackgroundDispose)
crop_image->dispose=NoneDispose;
return(crop_image);
}
if ((page.x < 0) && (bounding_box.x >= 0))
{
page.width+=page.x-bounding_box.x;
page.x=0;
}
else
{
page.width-=bounding_box.x-page.x;
page.x-=bounding_box.x;
if (page.x < 0)
page.x=0;
}
if ((page.y < 0) && (bounding_box.y >= 0))
{
page.height+=page.y-bounding_box.y;
page.y=0;
}
else
{
page.height-=bounding_box.y-page.y;
page.y-=bounding_box.y;
if (page.y < 0)
page.y=0;
}
if ((page.x+(ssize_t) page.width) > (ssize_t) image->columns)
page.width=image->columns-page.x;
if ((geometry->width != 0) && (page.width > geometry->width))
page.width=geometry->width;
if ((page.y+(ssize_t) page.height) > (ssize_t) image->rows)
page.height=image->rows-page.y;
if ((geometry->height != 0) && (page.height > geometry->height))
page.height=geometry->height;
bounding_box.x+=page.x;
bounding_box.y+=page.y;
if ((page.width == 0) || (page.height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
return((Image *) NULL);
}
/*
Initialize crop image attributes.
*/
crop_image=CloneImage(image,page.width,page.height,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->page.width=image->page.width;
crop_image->page.height=image->page.height;
offset.x=(ssize_t) (bounding_box.x+bounding_box.width);
offset.y=(ssize_t) (bounding_box.y+bounding_box.height);
if ((offset.x > (ssize_t) image->page.width) ||
(offset.y > (ssize_t) image->page.height))
{
crop_image->page.width=bounding_box.width;
crop_image->page.height=bounding_box.height;
}
crop_image->page.x=bounding_box.x;
crop_image->page.y=bounding_box.y;
/*
Crop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
crop_view=AcquireAuthenticCacheView(crop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,crop_image,crop_image->rows,1)
#endif
for (y=0; y < (ssize_t) crop_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,page.x,page.y+y,crop_image->columns,
1,exception);
q=QueueCacheViewAuthenticPixels(crop_view,0,y,crop_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) crop_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait crop_traits=GetPixelChannelTraits(crop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(crop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(crop_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(crop_image);
}
if (SyncCacheViewAuthenticPixels(crop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CropImage)
#endif
proceed=SetImageProgress(image,CropImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
crop_view=DestroyCacheView(crop_view);
image_view=DestroyCacheView(image_view);
crop_image->type=image->type;
if (status == MagickFalse)
crop_image=DestroyImage(crop_image);
return(crop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e T o T i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImageToTiles() crops a single image, into a possible list of tiles.
% This may include a single sub-region of the image. This basically applies
% all the normal geometry flags for Crop.
%
% Image *CropImageToTiles(const Image *image,
% const RectangleInfo *crop_geometry, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
MagickExport Image *CropImageToTiles(const Image *image,
const char *crop_geometry,ExceptionInfo *exception)
{
Image
*next,
*crop_image;
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
crop_image=NewImageList();
next=NewImageList();
flags=ParseGravityGeometry(image,crop_geometry,&geometry,exception);
if ((flags & AreaValue) != 0)
{
PointInfo
delta,
offset;
RectangleInfo
crop;
size_t
height,
width;
/*
Crop into NxM tiles (@ flag).
*/
width=image->columns;
height=image->rows;
if (geometry.width == 0)
geometry.width=1;
if (geometry.height == 0)
geometry.height=1;
if ((flags & AspectValue) == 0)
{
width-=(geometry.x < 0 ? -1 : 1)*geometry.x;
height-=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
else
{
width+=(geometry.x < 0 ? -1 : 1)*geometry.x;
height+=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
delta.x=(double) width/geometry.width;
delta.y=(double) height/geometry.height;
if (delta.x < 1.0)
delta.x=1.0;
if (delta.y < 1.0)
delta.y=1.0;
for (offset.y=0; offset.y < (double) height; )
{
if ((flags & AspectValue) == 0)
{
crop.y=(ssize_t) MagickRound((double) (offset.y-
(geometry.y > 0 ? 0 : geometry.y)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) MagickRound((double) (offset.y+
(geometry.y < 0 ? 0 : geometry.y)));
}
else
{
crop.y=(ssize_t) MagickRound((double) (offset.y-
(geometry.y > 0 ? geometry.y : 0)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) MagickRound((double)
(offset.y+(geometry.y < -1 ? geometry.y : 0)));
}
crop.height-=crop.y;
crop.y+=image->page.y;
for (offset.x=0; offset.x < (double) width; )
{
if ((flags & AspectValue) == 0)
{
crop.x=(ssize_t) MagickRound((double) (offset.x-
(geometry.x > 0 ? 0 : geometry.x)));
offset.x+=delta.x; /* increment now to find height */
crop.width=(size_t) MagickRound((double) (offset.x+
(geometry.x < 0 ? 0 : geometry.x)));
}
else
{
crop.x=(ssize_t) MagickRound((double) (offset.x-
(geometry.x > 0 ? geometry.x : 0)));
offset.x+=delta.x; /* increment now to find height */
crop.width=(size_t) MagickRound((double) (offset.x+
(geometry.x < 0 ? geometry.x : 0)));
}
crop.width-=crop.x;
crop.x+=image->page.x;
next=CropImage(image,&crop,exception);
if (next != (Image *) NULL)
AppendImageToList(&crop_image,next);
}
}
ClearMagickException(exception);
return(crop_image);
}
if (((geometry.width == 0) && (geometry.height == 0)) ||
((flags & XValue) != 0) || ((flags & YValue) != 0))
{
/*
Crop a single region at +X+Y.
*/
crop_image=CropImage(image,&geometry,exception);
if ((crop_image != (Image *) NULL) && ((flags & AspectValue) != 0))
{
crop_image->page.width=geometry.width;
crop_image->page.height=geometry.height;
crop_image->page.x-=geometry.x;
crop_image->page.y-=geometry.y;
}
return(crop_image);
}
if ((image->columns > geometry.width) || (image->rows > geometry.height))
{
RectangleInfo
page;
size_t
height,
width;
ssize_t
x,
y;
/*
Crop into tiles of fixed size WxH.
*/
page=image->page;
if (page.width == 0)
page.width=image->columns;
if (page.height == 0)
page.height=image->rows;
width=geometry.width;
if (width == 0)
width=page.width;
height=geometry.height;
if (height == 0)
height=page.height;
next=NewImageList();
for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height)
{
for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width)
{
geometry.width=width;
geometry.height=height;
geometry.x=x;
geometry.y=y;
next=CropImage(image,&geometry,exception);
if (next == (Image *) NULL)
break;
AppendImageToList(&crop_image,next);
}
if (next == (Image *) NULL)
break;
}
return(crop_image);
}
return(CloneImage(image,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x c e r p t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExcerptImage() returns a excerpt of the image as defined by the geometry.
%
% The format of the ExcerptImage method is:
%
% Image *ExcerptImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExcerptImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define ExcerptImageTag "Excerpt/Image"
CacheView
*excerpt_view,
*image_view;
Image
*excerpt_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate excerpt image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
excerpt_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (excerpt_image == (Image *) NULL)
return((Image *) NULL);
/*
Excerpt each row.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
excerpt_view=AcquireAuthenticCacheView(excerpt_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,excerpt_image,excerpt_image->rows,1)
#endif
for (y=0; y < (ssize_t) excerpt_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,geometry->x,geometry->y+y,
geometry->width,1,exception);
q=GetCacheViewAuthenticPixels(excerpt_view,0,y,excerpt_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) excerpt_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait excerpt_traits=GetPixelChannelTraits(excerpt_image,channel);
if ((traits == UndefinedPixelTrait) ||
(excerpt_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(excerpt_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(excerpt_image);
}
if (SyncCacheViewAuthenticPixels(excerpt_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ExcerptImage)
#endif
proceed=SetImageProgress(image,ExcerptImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
excerpt_view=DestroyCacheView(excerpt_view);
image_view=DestroyCacheView(image_view);
excerpt_image->type=image->type;
if (status == MagickFalse)
excerpt_image=DestroyImage(excerpt_image);
return(excerpt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x t e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExtentImage() extends the image as defined by the geometry, gravity, and
% image background color. Set the (x,y) offset of the geometry to move the
% original image relative to the extended image.
%
% The format of the ExtentImage method is:
%
% Image *ExtentImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExtentImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
Image
*extent_image;
/*
Allocate extent image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((image->columns == geometry->width) &&
(image->rows == geometry->height) &&
(geometry->x == 0) && (geometry->y == 0))
return(CloneImage(image,0,0,MagickTrue,exception));
extent_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (extent_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageBackgroundColor(extent_image,exception);
(void) CompositeImage(extent_image,image,image->compose,MagickTrue,
-geometry->x,-geometry->y,exception);
return(extent_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlipImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis.
%
% The format of the FlipImage method is:
%
% Image *FlipImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlipImage(const Image *image,ExceptionInfo *exception)
{
#define FlipImageTag "Flip/Image"
CacheView
*flip_view,
*image_view;
Image
*flip_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
flip_image=CloneImage(image,0,0,MagickTrue,exception);
if (flip_image == (Image *) NULL)
return((Image *) NULL);
/*
Flip image.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireVirtualCacheView(image,exception);
flip_view=AcquireAuthenticCacheView(flip_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,flip_image,flip_image->rows,1)
#endif
for (y=0; y < (ssize_t) flip_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flip_view,0,(ssize_t) (flip_image->rows-y-
1),flip_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) flip_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait flip_traits=GetPixelChannelTraits(flip_image,channel);
if ((traits == UndefinedPixelTrait) ||
(flip_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(flip_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(flip_image);
}
if (SyncCacheViewAuthenticPixels(flip_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FlipImage)
#endif
proceed=SetImageProgress(image,FlipImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flip_view=DestroyCacheView(flip_view);
image_view=DestroyCacheView(image_view);
flip_image->type=image->type;
if (page.height != 0)
page.y=(ssize_t) (page.height-flip_image->rows-page.y);
flip_image->page=page;
if (status == MagickFalse)
flip_image=DestroyImage(flip_image);
return(flip_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlopImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis.
%
% The format of the FlopImage method is:
%
% Image *FlopImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception)
{
#define FlopImageTag "Flop/Image"
CacheView
*flop_view,
*image_view;
Image
*flop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
flop_image=CloneImage(image,0,0,MagickTrue,exception);
if (flop_image == (Image *) NULL)
return((Image *) NULL);
/*
Flop each row.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireVirtualCacheView(image,exception);
flop_view=AcquireAuthenticCacheView(flop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,flop_image,flop_image->rows,1)
#endif
for (y=0; y < (ssize_t) flop_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
q+=GetPixelChannels(flop_image)*flop_image->columns;
for (x=0; x < (ssize_t) flop_image->columns; x++)
{
register ssize_t
i;
q-=GetPixelChannels(flop_image);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait flop_traits=GetPixelChannelTraits(flop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(flop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(flop_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FlopImage)
#endif
proceed=SetImageProgress(image,FlopImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flop_view=DestroyCacheView(flop_view);
image_view=DestroyCacheView(image_view);
flop_image->type=image->type;
if (page.width != 0)
page.x=(ssize_t) (page.width-flop_image->columns-page.x);
flop_image->page=page;
if (status == MagickFalse)
flop_image=DestroyImage(flop_image);
return(flop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RollImage() offsets an image as defined by x_offset and y_offset.
%
% The format of the RollImage method is:
%
% Image *RollImage(const Image *image,const ssize_t x_offset,
% const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x_offset: the number of columns to roll in the horizontal direction.
%
% o y_offset: the number of rows to roll in the vertical direction.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CopyImageRegion(Image *destination,const Image *source, const size_t columns,const size_t rows,const ssize_t sx,const ssize_t sy,
const ssize_t dx,const ssize_t dy,ExceptionInfo *exception)
{
CacheView
*source_view,
*destination_view;
MagickBooleanType
status;
ssize_t
y;
if (columns == 0)
return(MagickTrue);
status=MagickTrue;
source_view=AcquireVirtualCacheView(source,exception);
destination_view=AcquireAuthenticCacheView(destination,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,destination,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
/*
Transfer scanline.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,sx,sy+y,columns,1,exception);
q=GetCacheViewAuthenticPixels(destination_view,dx,dy+y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(source); i++)
{
PixelChannel channel = GetPixelChannelChannel(source,i);
PixelTrait source_traits=GetPixelChannelTraits(source,channel);
PixelTrait destination_traits=GetPixelChannelTraits(destination,
channel);
if ((source_traits == UndefinedPixelTrait) ||
(destination_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(destination,channel,p[i],q);
}
p+=GetPixelChannels(source);
q+=GetPixelChannels(destination);
}
sync=SyncCacheViewAuthenticPixels(destination_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
destination_view=DestroyCacheView(destination_view);
source_view=DestroyCacheView(source_view);
return(status);
}
MagickExport Image *RollImage(const Image *image,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define RollImageTag "Roll/Image"
Image
*roll_image;
MagickStatusType
status;
RectangleInfo
offset;
/*
Initialize roll image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
roll_image=CloneImage(image,0,0,MagickTrue,exception);
if (roll_image == (Image *) NULL)
return((Image *) NULL);
offset.x=x_offset;
offset.y=y_offset;
while (offset.x < 0)
offset.x+=(ssize_t) image->columns;
while (offset.x >= (ssize_t) image->columns)
offset.x-=(ssize_t) image->columns;
while (offset.y < 0)
offset.y+=(ssize_t) image->rows;
while (offset.y >= (ssize_t) image->rows)
offset.y-=(ssize_t) image->rows;
/*
Roll image.
*/
status=CopyImageRegion(roll_image,image,(size_t) offset.x,
(size_t) offset.y,(ssize_t) image->columns-offset.x,(ssize_t) image->rows-
offset.y,0,0,exception);
(void) SetImageProgress(image,RollImageTag,0,3);
status&=CopyImageRegion(roll_image,image,image->columns-offset.x,
(size_t) offset.y,0,(ssize_t) image->rows-offset.y,offset.x,0,
exception);
(void) SetImageProgress(image,RollImageTag,1,3);
status&=CopyImageRegion(roll_image,image,(size_t) offset.x,image->rows-
offset.y,(ssize_t) image->columns-offset.x,0,0,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,2,3);
status&=CopyImageRegion(roll_image,image,image->columns-offset.x,image->rows-
offset.y,0,0,offset.x,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,3,3);
roll_image->type=image->type;
if (status == MagickFalse)
roll_image=DestroyImage(roll_image);
return(roll_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShaveImage() shaves pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the ShaveImage method is:
%
% Image *ShaveImage(const Image *image,const RectangleInfo *shave_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o shave_image: Method ShaveImage returns a pointer to the shaved
% image. A null image is returned if there is a memory shortage or
% if the image width or height is zero.
%
% o image: the image.
%
% o shave_info: Specifies a pointer to a RectangleInfo which defines the
% region of the image to crop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShaveImage(const Image *image,
const RectangleInfo *shave_info,ExceptionInfo *exception)
{
Image
*shave_image;
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (((2*shave_info->width) >= image->columns) ||
((2*shave_info->height) >= image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
SetGeometry(image,&geometry);
geometry.width-=2*shave_info->width;
geometry.height-=2*shave_info->height;
geometry.x=(ssize_t) shave_info->width+image->page.x;
geometry.y=(ssize_t) shave_info->height+image->page.y;
shave_image=CropImage(image,&geometry,exception);
if (shave_image == (Image *) NULL)
return((Image *) NULL);
shave_image->page.width-=2*shave_info->width;
shave_image->page.height-=2*shave_info->height;
shave_image->page.x-=(ssize_t) shave_info->width;
shave_image->page.y-=(ssize_t) shave_info->height;
return(shave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p l i c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpliceImage() splices a solid color into the image as defined by the
% geometry.
%
% The format of the SpliceImage method is:
%
% Image *SpliceImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to splice with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpliceImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define SpliceImageTag "Splice/Image"
CacheView
*image_view,
*splice_view;
Image
*splice_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
splice_geometry;
ssize_t
columns,
y;
/*
Allocate splice image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
splice_geometry=(*geometry);
splice_image=CloneImage(image,image->columns+splice_geometry.width,
image->rows+splice_geometry.height,MagickTrue,exception);
if (splice_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(splice_image,DirectClass,exception) == MagickFalse)
{
splice_image=DestroyImage(splice_image);
return((Image *) NULL);
}
if ((IsPixelInfoGray(&splice_image->background_color) == MagickFalse) &&
(IsGrayColorspace(splice_image->colorspace) != MagickFalse))
(void) SetImageColorspace(splice_image,sRGBColorspace,exception);
if ((splice_image->background_color.alpha_trait != UndefinedPixelTrait) &&
(splice_image->alpha_trait == UndefinedPixelTrait))
(void) SetImageAlpha(splice_image,OpaqueAlpha,exception);
(void) SetImageBackgroundColor(splice_image,exception);
/*
Respect image geometry.
*/
switch (image->gravity)
{
default:
case UndefinedGravity:
case NorthWestGravity:
break;
case NorthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
break;
}
case NorthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
break;
}
case WestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.width/2;
break;
}
case CenterGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case EastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case SouthWestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
}
/*
Splice image.
*/
status=MagickTrue;
progress=0;
columns=MagickMin(splice_geometry.x,(ssize_t) splice_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
splice_view=AcquireAuthenticCacheView(splice_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,splice_image,splice_geometry.y,1)
#endif
for (y=0; y < (ssize_t) splice_geometry.y; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,splice_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q+=GetPixelChannels(splice_image);
for ( ; x < (ssize_t) splice_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransposeImage)
#endif
proceed=SetImageProgress(image,SpliceImageTag,progress++,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,splice_image,splice_image->rows,2)
#endif
for (y=(ssize_t) (splice_geometry.y+splice_geometry.height);
y < (ssize_t) splice_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
if ((y < 0) || (y >= (ssize_t)splice_image->rows))
continue;
p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height,
splice_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q+=GetPixelChannels(splice_image);
for ( ; x < (ssize_t) splice_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransposeImage)
#endif
proceed=SetImageProgress(image,SpliceImageTag,progress++,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
splice_view=DestroyCacheView(splice_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
splice_image=DestroyImage(splice_image);
return(splice_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImage() is a convenience method that behaves like ResizeImage() or
% CropImage() but accepts scaling and/or cropping information as a region
% geometry specification. If the operation fails, the original image handle
% is left as is.
%
% This should only be used for single images.
%
% This function destroys what it assumes to be a single image list.
% If the input image is part of a larger list, all other images in that list
% will be simply 'lost', not destroyed.
%
% Also if the crop generates a list of images only the first image is resized.
% And finally if the crop succeeds and the resize failed, you will get a
% cropped image, as well as a 'false' or 'failed' report.
%
% This function and should probably be deprecated in favor of direct calls
% to CropImageToTiles() or ResizeImage(), as appropriate.
%
% The format of the TransformImage method is:
%
% MagickBooleanType TransformImage(Image **image,const char *crop_geometry,
% const char *image_geometry,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string. This geometry defines a
% subregion of the image to crop.
%
% o image_geometry: An image geometry string. This geometry defines the
% final size of the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType TransformImage(Image **image,
const char *crop_geometry,const char *image_geometry,ExceptionInfo *exception)
{
Image
*resize_image,
*transform_image;
RectangleInfo
geometry;
assert(image != (Image **) NULL);
assert((*image)->signature == MagickCoreSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
transform_image=(*image);
if (crop_geometry != (const char *) NULL)
{
Image
*crop_image;
/*
Crop image to a user specified size.
*/
crop_image=CropImageToTiles(*image,crop_geometry,exception);
if (crop_image == (Image *) NULL)
transform_image=CloneImage(*image,0,0,MagickTrue,exception);
else
{
transform_image=DestroyImage(transform_image);
transform_image=GetFirstImageInList(crop_image);
}
*image=transform_image;
}
if (image_geometry == (const char *) NULL)
return(MagickTrue);
/*
Scale image to a user specified size.
*/
(void) ParseRegionGeometry(transform_image,image_geometry,&geometry,
exception);
if ((transform_image->columns == geometry.width) &&
(transform_image->rows == geometry.height))
return(MagickTrue);
resize_image=ResizeImage(transform_image,geometry.width,geometry.height,
transform_image->filter,exception);
if (resize_image == (Image *) NULL)
return(MagickFalse);
transform_image=DestroyImage(transform_image);
transform_image=resize_image;
*image=transform_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p o s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransposeImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis while rotating them by 90 degrees.
%
% The format of the TransposeImage method is:
%
% Image *TransposeImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception)
{
#define TransposeImageTag "Transpose/Image"
CacheView
*image_view,
*transpose_view;
Image
*transpose_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transpose_image == (Image *) NULL)
return((Image *) NULL);
/*
Transpose image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
transpose_view=AcquireAuthenticCacheView(transpose_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,transpose_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1),
0,1,transpose_image->rows,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait transpose_traits=GetPixelChannelTraits(transpose_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(transpose_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(transpose_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(transpose_image);
}
if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransposeImage)
#endif
proceed=SetImageProgress(image,TransposeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transpose_view=DestroyCacheView(transpose_view);
image_view=DestroyCacheView(image_view);
transpose_image->type=image->type;
page=transpose_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
transpose_image->page=page;
if (status == MagickFalse)
transpose_image=DestroyImage(transpose_image);
return(transpose_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s v e r s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransverseImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis while rotating them by 270 degrees.
%
% The format of the TransverseImage method is:
%
% Image *TransverseImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransverseImage(const Image *image,ExceptionInfo *exception)
{
#define TransverseImageTag "Transverse/Image"
CacheView
*image_view,
*transverse_view;
Image
*transverse_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
transverse_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transverse_image == (Image *) NULL)
return((Image *) NULL);
/*
Transverse image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
transverse_view=AcquireAuthenticCacheView(transverse_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,transverse_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transverse_view,(ssize_t) (image->rows-y-1),
0,1,transverse_image->rows,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
q+=GetPixelChannels(transverse_image)*image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
q-=GetPixelChannels(transverse_image);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait transverse_traits=GetPixelChannelTraits(transverse_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(transverse_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(transverse_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(transverse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransverseImage)
#endif
proceed=SetImageProgress(image,TransverseImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transverse_view=DestroyCacheView(transverse_view);
image_view=DestroyCacheView(image_view);
transverse_image->type=image->type;
page=transverse_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.width != 0)
page.x=(ssize_t) (page.width-transverse_image->columns-page.x);
if (page.height != 0)
page.y=(ssize_t) (page.height-transverse_image->rows-page.y);
transverse_image->page=page;
if (status == MagickFalse)
transverse_image=DestroyImage(transverse_image);
return(transverse_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r i m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TrimImage() trims pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the TrimImage method is:
%
% Image *TrimImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TrimImage(const Image *image,ExceptionInfo *exception)
{
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
geometry=GetImageBoundingBox(image,exception);
if ((geometry.width == 0) || (geometry.height == 0))
{
Image
*crop_image;
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.alpha=(MagickRealType) TransparentAlpha;
crop_image->alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(crop_image,exception);
crop_image->page=image->page;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
return(crop_image);
}
geometry.x+=image->page.x;
geometry.y+=image->page.y;
return(CropImage(image,&geometry,exception));
}
|
GB_cumsum.c | //------------------------------------------------------------------------------
// GB_cumsum: cumlative sum of an array
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Compute the cumulative sum of an array count[0:n], of size n+1:
// k = sum (count [0:n-1] != 0) ;
// count = cumsum ([0 count[0:n-1]]) ;
// That is, count [j] on input is overwritten with sum (count [0..j-1]).
// On input, count [n] is not accessed and is implicitly zero on input.
// On output, count [n] is the total sum.
#include "GB.h"
GB_PUBLIC
void GB_cumsum // cumulative sum of an array
(
int64_t *restrict count, // size n+1, input/output
const int64_t n,
int64_t *restrict kresult, // return k, if needed by the caller
int nthreads,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (count != NULL) ;
ASSERT (n >= 0) ;
//--------------------------------------------------------------------------
// determine # of threads to use
//--------------------------------------------------------------------------
#if !defined ( _OPENMP )
nthreads = 1 ;
#endif
if (nthreads > 1)
{
nthreads = GB_IMIN (nthreads, n / (64 * 1024)) ;
nthreads = GB_IMAX (nthreads, 1) ;
}
//--------------------------------------------------------------------------
// count = cumsum ([0 count[0:n-1]]) ;
//--------------------------------------------------------------------------
if (kresult == NULL)
{
if (nthreads <= 2)
{
//------------------------------------------------------------------
// cumsum with one thread
//------------------------------------------------------------------
int64_t s = 0 ;
for (int64_t i = 0 ; i < n ; i++)
{
int64_t c = count [i] ;
count [i] = s ;
s += c ;
}
count [n] = s ;
}
else
{
//------------------------------------------------------------------
// cumsum with multiple threads
//------------------------------------------------------------------
// allocate workspace
GB_WERK_DECLARE (ws, int64_t) ;
GB_WERK_PUSH (ws, nthreads, int64_t) ;
if (ws == NULL)
{
// out of memory; use a single thread instead
GB_cumsum (count, n, NULL, 1, NULL) ;
return ;
}
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
// each task sums up its own part
int64_t istart, iend ;
GB_PARTITION (istart, iend, n, tid, nthreads) ;
int64_t s = 0 ;
for (int64_t i = istart ; i < iend ; i++)
{
s += count [i] ;
}
ws [tid] = s ;
}
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
// each tasks computes the cumsum of its own part
int64_t istart, iend ;
GB_PARTITION (istart, iend, n, tid, nthreads) ;
int64_t s = 0 ;
for (int i = 0 ; i < tid ; i++)
{
s += ws [i] ;
}
for (int64_t i = istart ; i < iend ; i++)
{
int64_t c = count [i] ;
count [i] = s ;
s += c ;
}
if (iend == n)
{
count [n] = s ;
}
}
// free workspace
GB_WERK_POP (ws, int64_t) ;
}
}
else
{
if (nthreads <= 2)
{
//------------------------------------------------------------------
// cumsum with one thread, also compute k
//------------------------------------------------------------------
int64_t k = 0 ;
int64_t s = 0 ;
for (int64_t i = 0 ; i < n ; i++)
{
int64_t c = count [i] ;
if (c != 0) k++ ;
count [i] = s ;
s += c ;
}
count [n] = s ;
(*kresult) = k ;
}
else
{
//------------------------------------------------------------------
// cumsum with multiple threads, also compute k
//------------------------------------------------------------------
// allocate workspace
GB_WERK_DECLARE (ws, int64_t) ;
GB_WERK_DECLARE (wk, int64_t) ;
GB_WERK_PUSH (ws, nthreads, int64_t) ;
GB_WERK_PUSH (wk, nthreads, int64_t) ;
if (ws == NULL || wk == NULL)
{
// out of memory; use a single thread instead
GB_WERK_POP (wk, int64_t) ;
GB_WERK_POP (ws, int64_t) ;
GB_cumsum (count, n, kresult, 1, NULL) ;
return ;
}
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
// each task sums up its own part
int64_t istart, iend ;
GB_PARTITION (istart, iend, n, tid, nthreads) ;
int64_t k = 0 ;
int64_t s = 0 ;
for (int64_t i = istart ; i < iend ; i++)
{
int64_t c = count [i] ;
if (c != 0) k++ ;
s += c ;
}
ws [tid] = s ;
wk [tid] = k ;
}
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
// each task computes the cumsum of its own part
int64_t istart, iend ;
GB_PARTITION (istart, iend, n, tid, nthreads) ;
int64_t s = 0 ;
for (int i = 0 ; i < tid ; i++)
{
s += ws [i] ;
}
for (int64_t i = istart ; i < iend ; i++)
{
int64_t c = count [i] ;
count [i] = s ;
s += c ;
}
if (iend == n)
{
count [n] = s ;
}
}
int64_t k = 0 ;
for (int tid = 0 ; tid < nthreads ; tid++)
{
k += wk [tid] ;
}
(*kresult) = k ;
// free workspace
GB_WERK_POP (wk, int64_t) ;
GB_WERK_POP (ws, int64_t) ;
}
}
}
|
visual-effects.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% V V IIIII SSSSS U U AAA L %
% V V I SS U U A A L %
% V V I SSS U U AAAAA L %
% V V I SS U U A A L %
% V IIIII SSSSS UUU A A LLLLL %
% %
% EEEEE FFFFF FFFFF EEEEE CCCC TTTTT SSSSS %
% E F F E C T SS %
% EEE FFF FFF EEE C T SSS %
% E F F E C T SS %
% EEEEE F F EEEEE CCCC T SSSSS %
% %
% %
% MagickCore Image Special Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/layer.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/transform.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
#include "MagickCore/visual-effects.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d d N o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AddNoiseImage() adds random noise to the image.
%
% The format of the AddNoiseImage method is:
%
% Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
% const double attenuate,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o noise_type: The type of noise: Uniform, Gaussian, Multiplicative,
% Impulse, Laplacian, or Poisson.
%
% o attenuate: attenuate the random distribution.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
const double attenuate,ExceptionInfo *exception)
{
#define AddNoiseImageTag "AddNoise/Image"
CacheView
*image_view,
*noise_view;
Image
*noise_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
noise_image=AccelerateAddNoiseImage(image,noise_type,attenuate,exception);
if (noise_image != (Image *) NULL)
return(noise_image);
#endif
noise_image=CloneImage(image,0,0,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse)
{
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
/*
Add noise in each row.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireVirtualCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,noise_image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait noise_traits=GetPixelChannelTraits(noise_image,channel);
if ((traits == UndefinedPixelTrait) ||
(noise_traits == UndefinedPixelTrait))
continue;
if ((noise_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(noise_image,channel,p[i],q);
continue;
}
SetPixelChannel(noise_image,channel,ClampToQuantum(
GenerateDifferentialNoise(random_info[id],p[i],noise_type,attenuate)),
q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(noise_image);
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,AddNoiseImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
noise_image=DestroyImage(noise_image);
return(noise_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l u e S h i f t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlueShiftImage() mutes the colors of the image to simulate a scene at
% nighttime in the moonlight.
%
% The format of the BlueShiftImage method is:
%
% Image *BlueShiftImage(const Image *image,const double factor,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o factor: the shift factor.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BlueShiftImage(const Image *image,const double factor,
ExceptionInfo *exception)
{
#define BlueShiftImageTag "BlueShift/Image"
CacheView
*image_view,
*shift_view;
Image
*shift_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate blue shift image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
shift_image=CloneImage(image,0,0,MagickTrue,exception);
if (shift_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(shift_image,DirectClass,exception) == MagickFalse)
{
shift_image=DestroyImage(shift_image);
return((Image *) NULL);
}
/*
Blue-shift DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
shift_view=AcquireAuthenticCacheView(shift_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,shift_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
Quantum
quantum;
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(shift_view,0,y,shift_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
quantum=GetPixelRed(image,p);
if (GetPixelGreen(image,p) < quantum)
quantum=GetPixelGreen(image,p);
if (GetPixelBlue(image,p) < quantum)
quantum=GetPixelBlue(image,p);
pixel.red=0.5*(GetPixelRed(image,p)+factor*quantum);
pixel.green=0.5*(GetPixelGreen(image,p)+factor*quantum);
pixel.blue=0.5*(GetPixelBlue(image,p)+factor*quantum);
quantum=GetPixelRed(image,p);
if (GetPixelGreen(image,p) > quantum)
quantum=GetPixelGreen(image,p);
if (GetPixelBlue(image,p) > quantum)
quantum=GetPixelBlue(image,p);
pixel.red=0.5*(pixel.red+factor*quantum);
pixel.green=0.5*(pixel.green+factor*quantum);
pixel.blue=0.5*(pixel.blue+factor*quantum);
SetPixelRed(shift_image,ClampToQuantum(pixel.red),q);
SetPixelGreen(shift_image,ClampToQuantum(pixel.green),q);
SetPixelBlue(shift_image,ClampToQuantum(pixel.blue),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(shift_image);
}
sync=SyncCacheViewAuthenticPixels(shift_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,BlueShiftImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
shift_view=DestroyCacheView(shift_view);
if (status == MagickFalse)
shift_image=DestroyImage(shift_image);
return(shift_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a r c o a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CharcoalImage() creates a new image that is a copy of an existing one with
% the edge highlighted. It allocates the memory necessary for the new Image
% structure and returns a pointer to the new image.
%
% The format of the CharcoalImage method is:
%
% Image *CharcoalImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CharcoalImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*charcoal_image,
*edge_image;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
return((Image *) NULL);
edge_image->alpha_trait=UndefinedPixelTrait;
charcoal_image=(Image *) NULL;
status=ClampImage(edge_image,exception);
if (status != MagickFalse)
charcoal_image=BlurImage(edge_image,radius,sigma,exception);
edge_image=DestroyImage(edge_image);
if (charcoal_image == (Image *) NULL)
return((Image *) NULL);
status=NormalizeImage(charcoal_image,exception);
if (status != MagickFalse)
status=NegateImage(charcoal_image,MagickFalse,exception);
if (status != MagickFalse)
status=GrayscaleImage(charcoal_image,image->intensity,exception);
if (status == MagickFalse)
charcoal_image=DestroyImage(charcoal_image);
return(charcoal_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorizeImage() blends the fill color with each pixel in the image.
% A percentage blend is specified with opacity. Control the application
% of different color components by specifying a different percentage for
% each component (e.g. 90/100/10 is 90% red, 100% green, and 10% blue).
%
% The format of the ColorizeImage method is:
%
% Image *ColorizeImage(const Image *image,const char *blend,
% const PixelInfo *colorize,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o blend: A character string indicating the level of blending as a
% percentage.
%
% o colorize: A color value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ColorizeImage(const Image *image,const char *blend,
const PixelInfo *colorize,ExceptionInfo *exception)
{
#define ColorizeImageTag "Colorize/Image"
#define Colorize(pixel,blend_percentage,colorize) \
(((pixel)*(100.0-(blend_percentage))+(colorize)*(blend_percentage))/100.0)
CacheView
*image_view;
GeometryInfo
geometry_info;
Image
*colorize_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickStatusType
flags;
PixelInfo
blend_percentage;
ssize_t
y;
/*
Allocate colorized image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
colorize_image=CloneImage(image,0,0,MagickTrue,exception);
if (colorize_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(colorize_image,DirectClass,exception) == MagickFalse)
{
colorize_image=DestroyImage(colorize_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(colorize_image->colorspace) != MagickFalse) ||
(IsPixelInfoGray(colorize) != MagickFalse))
(void) SetImageColorspace(colorize_image,sRGBColorspace,exception);
if ((colorize_image->alpha_trait == UndefinedPixelTrait) &&
(colorize->alpha_trait != UndefinedPixelTrait))
(void) SetImageAlpha(colorize_image,OpaqueAlpha,exception);
if (blend == (const char *) NULL)
return(colorize_image);
GetPixelInfo(colorize_image,&blend_percentage);
flags=ParseGeometry(blend,&geometry_info);
blend_percentage.red=geometry_info.rho;
blend_percentage.green=geometry_info.rho;
blend_percentage.blue=geometry_info.rho;
blend_percentage.black=geometry_info.rho;
blend_percentage.alpha=(MagickRealType) TransparentAlpha;
if ((flags & SigmaValue) != 0)
blend_percentage.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
blend_percentage.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
blend_percentage.alpha=geometry_info.psi;
if (blend_percentage.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
blend_percentage.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
blend_percentage.alpha=geometry_info.chi;
}
/*
Colorize DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(colorize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(colorize_image,colorize_image,colorize_image->rows,1)
#endif
for (y=0; y < (ssize_t) colorize_image->rows; y++)
{
MagickBooleanType
sync;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,colorize_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) colorize_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(colorize_image); i++)
{
PixelTrait traits = GetPixelChannelTraits(colorize_image,
(PixelChannel) i);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
SetPixelChannel(colorize_image,(PixelChannel) i,ClampToQuantum(
Colorize(q[i],GetPixelInfoChannel(&blend_percentage,(PixelChannel) i),
GetPixelInfoChannel(colorize,(PixelChannel) i))),q);
}
q+=GetPixelChannels(colorize_image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ColorizeImageTag,progress,
colorize_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
colorize_image=DestroyImage(colorize_image);
return(colorize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r M a t r i x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorMatrixImage() applies color transformation to an image. This method
% permits saturation changes, hue rotation, luminance to alpha, and various
% other effects. Although variable-sized transformation matrices can be used,
% typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA
% (or RGBA with offsets). The matrix is similar to those used by Adobe Flash
% except offsets are in column 6 rather than 5 (in support of CMYKA images)
% and offsets are normalized (divide Flash offset by 255).
%
% The format of the ColorMatrixImage method is:
%
% Image *ColorMatrixImage(const Image *image,
% const KernelInfo *color_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_matrix: the color matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
/* FUTURE: modify to make use of a MagickMatrix Mutliply function
That should be provided in "matrix.c"
(ASIDE: actually distorts should do this too but currently doesn't)
*/
MagickExport Image *ColorMatrixImage(const Image *image,
const KernelInfo *color_matrix,ExceptionInfo *exception)
{
#define ColorMatrixImageTag "ColorMatrix/Image"
CacheView
*color_view,
*image_view;
double
ColorMatrix[6][6] =
{
{ 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 1.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 }
};
Image
*color_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
u,
v,
y;
/*
Map given color_matrix, into a 6x6 matrix RGBKA and a constant
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
i=0;
for (v=0; v < (ssize_t) color_matrix->height; v++)
for (u=0; u < (ssize_t) color_matrix->width; u++)
{
if ((v < 6) && (u < 6))
ColorMatrix[v][u]=color_matrix->values[i];
i++;
}
/*
Initialize color image.
*/
color_image=CloneImage(image,0,0,MagickTrue,exception);
if (color_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(color_image,DirectClass,exception) == MagickFalse)
{
color_image=DestroyImage(color_image);
return((Image *) NULL);
}
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
*message;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" ColorMatrix image with color matrix:");
message=AcquireString("");
for (v=0; v < 6; v++)
{
*message='\0';
(void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < 6; u++)
{
(void) FormatLocaleString(format,MagickPathExtent,"%+f ",
ColorMatrix[v][u]);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
/*
Apply the ColorMatrix to image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
color_view=AcquireAuthenticCacheView(color_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,color_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(color_view,0,y,color_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
h;
size_t
height;
GetPixelInfoPixel(image,p,&pixel);
height=color_matrix->height > 6 ? 6UL : color_matrix->height;
for (h=0; h < (ssize_t) height; h++)
{
double
sum;
sum=ColorMatrix[h][0]*GetPixelRed(image,p)+ColorMatrix[h][1]*
GetPixelGreen(image,p)+ColorMatrix[h][2]*GetPixelBlue(image,p);
if (image->colorspace == CMYKColorspace)
sum+=ColorMatrix[h][3]*GetPixelBlack(image,p);
if (image->alpha_trait != UndefinedPixelTrait)
sum+=ColorMatrix[h][4]*GetPixelAlpha(image,p);
sum+=QuantumRange*ColorMatrix[h][5];
switch (h)
{
case 0: pixel.red=sum; break;
case 1: pixel.green=sum; break;
case 2: pixel.blue=sum; break;
case 3: pixel.black=sum; break;
case 4: pixel.alpha=sum; break;
default: break;
}
}
SetPixelViaPixelInfo(color_image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(color_image);
}
if (SyncCacheViewAuthenticPixels(color_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ColorMatrixImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
color_view=DestroyCacheView(color_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
color_image=DestroyImage(color_image);
return(color_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I m p l o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ImplodeImage() creates a new image that is a copy of an existing
% one with the image pixels "implode" by the specified percentage. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ImplodeImage method is:
%
% Image *ImplodeImage(const Image *image,const double amount,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o implode_image: Method ImplodeImage returns a pointer to the image
% after it is implode. A null image is returned if there is a memory
% shortage.
%
% o image: the image.
%
% o amount: Define the extent of the implosion.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ImplodeImage(const Image *image,const double amount,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define ImplodeImageTag "Implode/Image"
CacheView
*canvas_view,
*implode_view,
*interpolate_view;
double
radius;
Image
*canvas_image,
*implode_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize implode image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
if ((canvas_image->alpha_trait == UndefinedPixelTrait) &&
(canvas_image->background_color.alpha != OpaqueAlpha))
(void) SetImageAlphaChannel(canvas_image,OpaqueAlphaChannel,exception);
implode_image=CloneImage(canvas_image,0,0,MagickTrue,exception);
if (implode_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
if (SetImageStorageClass(implode_image,DirectClass,exception) == MagickFalse)
{
canvas_image=DestroyImage(canvas_image);
implode_image=DestroyImage(implode_image);
return((Image *) NULL);
}
/*
Compute scaling factor.
*/
scale.x=1.0;
scale.y=1.0;
center.x=0.5*canvas_image->columns;
center.y=0.5*canvas_image->rows;
radius=center.x;
if (canvas_image->columns > canvas_image->rows)
scale.y=(double) canvas_image->columns*PerceptibleReciprocal((double)
canvas_image->rows);
else
if (canvas_image->columns < canvas_image->rows)
{
scale.x=(double) canvas_image->rows*PerceptibleReciprocal((double)
canvas_image->columns);
radius=center.y;
}
/*
Implode image.
*/
status=MagickTrue;
progress=0;
canvas_view=AcquireVirtualCacheView(canvas_image,exception);
interpolate_view=AcquireVirtualCacheView(canvas_image,exception);
implode_view=AcquireAuthenticCacheView(implode_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(canvas_image,implode_image,canvas_image->rows,1)
#endif
for (y=0; y < (ssize_t) canvas_image->rows; y++)
{
double
distance;
PointInfo
delta;
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(implode_view,0,y,implode_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
delta.y=scale.y*(double) (y-center.y);
for (x=0; x < (ssize_t) canvas_image->columns; x++)
{
ssize_t
i;
/*
Determine if the pixel is within an ellipse.
*/
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance >= (radius*radius))
for (i=0; i < (ssize_t) GetPixelChannels(canvas_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(canvas_image,i);
PixelTrait traits = GetPixelChannelTraits(canvas_image,channel);
PixelTrait implode_traits = GetPixelChannelTraits(implode_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(implode_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(implode_image,channel,p[i],q);
}
else
{
double
factor;
/*
Implode the pixel.
*/
factor=1.0;
if (distance > 0.0)
factor=pow(sin(MagickPI*sqrt((double) distance)*PerceptibleReciprocal(radius)/2),-amount);
status=InterpolatePixelChannels(canvas_image,interpolate_view,
implode_image,method,(double) (factor*delta.x*PerceptibleReciprocal(scale.x)+center.x),
(double) (factor*delta.y*PerceptibleReciprocal(scale.y)+center.y),q,exception);
if (status == MagickFalse)
break;
}
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(implode_image);
}
if (SyncCacheViewAuthenticPixels(implode_view,exception) == MagickFalse)
status=MagickFalse;
if (canvas_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(canvas_image,ImplodeImageTag,progress,
canvas_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
implode_view=DestroyCacheView(implode_view);
interpolate_view=DestroyCacheView(interpolate_view);
canvas_view=DestroyCacheView(canvas_view);
canvas_image=DestroyImage(canvas_image);
if (status == MagickFalse)
implode_image=DestroyImage(implode_image);
return(implode_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The MorphImages() method requires a minimum of two images. The first
% image is transformed into the second by a number of intervening images
% as specified by frames.
%
% The format of the MorphImage method is:
%
% Image *MorphImages(const Image *image,const size_t number_frames,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_frames: Define the number of in-between image to generate.
% The more in-between frames, the smoother the morph.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphImages(const Image *image,const size_t number_frames,
ExceptionInfo *exception)
{
#define MorphImageTag "Morph/Image"
double
alpha,
beta;
Image
*morph_image,
*morph_images;
MagickBooleanType
status;
MagickOffsetType
scene;
const Image
*next;
ssize_t
n;
ssize_t
y;
/*
Clone first frame in sequence.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
morph_images=CloneImage(image,0,0,MagickTrue,exception);
if (morph_images == (Image *) NULL)
return((Image *) NULL);
if (GetNextImageInList(image) == (Image *) NULL)
{
/*
Morph single image.
*/
for (n=1; n < (ssize_t) number_frames; n++)
{
morph_image=CloneImage(image,0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphImageTag,(MagickOffsetType) n,
number_frames);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(GetFirstImageInList(morph_images));
}
/*
Morph image sequence.
*/
status=MagickTrue;
scene=0;
next=image;
for ( ; GetNextImageInList(next) != (Image *) NULL; next=GetNextImageInList(next))
{
for (n=0; n < (ssize_t) number_frames; n++)
{
CacheView
*image_view,
*morph_view;
beta=(double) (n+1.0)/(double) (number_frames+1.0);
alpha=1.0-beta;
morph_image=ResizeImage(next,(size_t) (alpha*next->columns+beta*
GetNextImageInList(next)->columns+0.5),(size_t) (alpha*next->rows+beta*
GetNextImageInList(next)->rows+0.5),next->filter,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
status=SetImageStorageClass(morph_image,DirectClass,exception);
if (status == MagickFalse)
{
morph_image=DestroyImage(morph_image);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
morph_image=ResizeImage(GetNextImageInList(next),morph_images->columns,
morph_images->rows,GetNextImageInList(next)->filter,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
image_view=AcquireVirtualCacheView(morph_image,exception);
morph_view=AcquireAuthenticCacheView(morph_images,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(morph_image,morph_image,morph_image->rows,1)
#endif
for (y=0; y < (ssize_t) morph_images->rows; y++)
{
MagickBooleanType
sync;
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,morph_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(morph_view,0,y,morph_images->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) morph_images->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(morph_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(morph_image,i);
PixelTrait traits = GetPixelChannelTraits(morph_image,channel);
PixelTrait morph_traits=GetPixelChannelTraits(morph_images,channel);
if ((traits == UndefinedPixelTrait) ||
(morph_traits == UndefinedPixelTrait))
continue;
if ((morph_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(morph_image,channel,p[i],q);
continue;
}
SetPixelChannel(morph_image,channel,ClampToQuantum(alpha*
GetPixelChannel(morph_images,channel,q)+beta*p[i]),q);
}
p+=GetPixelChannels(morph_image);
q+=GetPixelChannels(morph_images);
}
sync=SyncCacheViewAuthenticPixels(morph_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
morph_view=DestroyCacheView(morph_view);
image_view=DestroyCacheView(image_view);
morph_image=DestroyImage(morph_image);
}
if (n < (ssize_t) number_frames)
break;
/*
Clone last frame in sequence.
*/
morph_image=CloneImage(GetNextImageInList(next),0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphImageTag,scene,
GetImageListLength(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
scene++;
}
if (GetNextImageInList(next) != (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
return(GetFirstImageInList(morph_images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P l a s m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PlasmaImage() initializes an image with plasma fractal values. The image
% must be initialized with a base color and the random number generator
% seeded before this method is called.
%
% The format of the PlasmaImage method is:
%
% MagickBooleanType PlasmaImage(Image *image,const SegmentInfo *segment,
% size_t attenuate,size_t depth,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o segment: Define the region to apply plasma fractals values.
%
% o attenuate: Define the plasma attenuation factor.
%
% o depth: Limit the plasma recursion depth.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum PlasmaPixel(RandomInfo *magick_restrict random_info,
const double pixel,const double noise)
{
MagickRealType
plasma;
plasma=pixel+noise*GetPseudoRandomValue(random_info)-noise/2.0;
return(ClampToQuantum(plasma));
}
static MagickBooleanType PlasmaImageProxy(Image *image,CacheView *image_view,
CacheView *u_view,CacheView *v_view,RandomInfo *magick_restrict random_info,
const SegmentInfo *magick_restrict segment,size_t attenuate,size_t depth,
ExceptionInfo *exception)
{
double
plasma;
MagickStatusType
status;
const Quantum
*magick_restrict u,
*magick_restrict v;
Quantum
*magick_restrict q;
ssize_t
i;
ssize_t
x,
x_mid,
y,
y_mid;
if ((fabs(segment->x2-segment->x1) < MagickEpsilon) &&
(fabs(segment->y2-segment->y1) < MagickEpsilon))
return(MagickTrue);
if (depth != 0)
{
SegmentInfo
local_info;
/*
Divide the area into quadrants and recurse.
*/
depth--;
attenuate++;
x_mid=CastDoubleToLong(ceil((segment->x1+segment->x2)/2-0.5));
y_mid=CastDoubleToLong(ceil((segment->y1+segment->y2)/2-0.5));
local_info=(*segment);
local_info.x2=(double) x_mid;
local_info.y2=(double) y_mid;
status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.y1=(double) y_mid;
local_info.x2=(double) x_mid;
status&=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y2=(double) y_mid;
status&=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y1=(double) y_mid;
status&=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
return(status == 0 ? MagickFalse : MagickTrue);
}
x_mid=CastDoubleToLong(ceil((segment->x1+segment->x2)/2-0.5));
y_mid=CastDoubleToLong(ceil((segment->y1+segment->y2)/2-0.5));
if ((fabs(segment->x1-x_mid) < MagickEpsilon) &&
(fabs(segment->x2-x_mid) < MagickEpsilon) &&
(fabs(segment->y1-y_mid) < MagickEpsilon) &&
(fabs(segment->y2-y_mid) < MagickEpsilon))
return(MagickFalse);
/*
Average pixels and apply plasma.
*/
status=MagickTrue;
plasma=(double) QuantumRange/(2.0*attenuate);
if ((fabs(segment->x1-x_mid) >= MagickEpsilon) ||
(fabs(segment->x2-x_mid) >= MagickEpsilon))
{
/*
Left pixel.
*/
x=CastDoubleToLong(ceil(segment->x1-0.5));
u=GetCacheViewVirtualPixels(u_view,x,CastDoubleToLong(ceil(
segment->y1-0.5)),1,1,exception);
v=GetCacheViewVirtualPixels(v_view,x,CastDoubleToLong(ceil(
segment->y2-0.5)),1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
if (fabs(segment->x1-segment->x2) >= MagickEpsilon)
{
/*
Right pixel.
*/
x=CastDoubleToLong(ceil(segment->x2-0.5));
u=GetCacheViewVirtualPixels(u_view,x,CastDoubleToLong(ceil(
segment->y1-0.5)),1,1,exception);
v=GetCacheViewVirtualPixels(v_view,x,CastDoubleToLong(ceil(
segment->y2-0.5)),1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickFalse);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((fabs(segment->y1-y_mid) >= MagickEpsilon) ||
(fabs(segment->y2-y_mid) >= MagickEpsilon))
{
if ((fabs(segment->x1-x_mid) >= MagickEpsilon) ||
(fabs(segment->y2-y_mid) >= MagickEpsilon))
{
/*
Bottom pixel.
*/
y=CastDoubleToLong(ceil(segment->y2-0.5));
u=GetCacheViewVirtualPixels(u_view,CastDoubleToLong(ceil(
segment->x1-0.5)),y,1,1,exception);
v=GetCacheViewVirtualPixels(v_view,CastDoubleToLong(ceil(
segment->x2-0.5)),y,1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
if (fabs(segment->y1-segment->y2) >= MagickEpsilon)
{
/*
Top pixel.
*/
y=CastDoubleToLong(ceil(segment->y1-0.5));
u=GetCacheViewVirtualPixels(u_view,CastDoubleToLong(ceil(
segment->x1-0.5)),y,1,1,exception);
v=GetCacheViewVirtualPixels(v_view,CastDoubleToLong(ceil(
segment->x2-0.5)),y,1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((fabs(segment->x1-segment->x2) >= MagickEpsilon) ||
(fabs(segment->y1-segment->y2) >= MagickEpsilon))
{
/*
Middle pixel.
*/
x=CastDoubleToLong(ceil(segment->x1-0.5));
y=CastDoubleToLong(ceil(segment->y1-0.5));
u=GetCacheViewVirtualPixels(u_view,x,y,1,1,exception);
x=CastDoubleToLong(ceil(segment->x2-0.5));
y=CastDoubleToLong(ceil(segment->y2-0.5));
v=GetCacheViewVirtualPixels(v_view,x,y,1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
if ((fabs(segment->x2-segment->x1) < 3.0) &&
(fabs(segment->y2-segment->y1) < 3.0))
return(status == 0 ? MagickFalse : MagickTrue);
return(MagickFalse);
}
MagickExport MagickBooleanType PlasmaImage(Image *image,
const SegmentInfo *segment,size_t attenuate,size_t depth,
ExceptionInfo *exception)
{
CacheView
*image_view,
*u_view,
*v_view;
MagickBooleanType
status;
RandomInfo
*random_info;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
u_view=AcquireVirtualCacheView(image,exception);
v_view=AcquireVirtualCacheView(image,exception);
random_info=AcquireRandomInfo();
status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,segment,
attenuate,depth,exception);
random_info=DestroyRandomInfo(random_info);
v_view=DestroyCacheView(v_view);
u_view=DestroyCacheView(u_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l a r o i d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolaroidImage() simulates a Polaroid picture.
%
% The format of the PolaroidImage method is:
%
% Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
% const char *caption,const double angle,
% const PixelInterpolateMethod method,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o caption: the Polaroid caption.
%
% o angle: Apply the effect along this angle.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
const char *caption,const double angle,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
Image
*bend_image,
*caption_image,
*flop_image,
*picture_image,
*polaroid_image,
*rotate_image,
*trim_image;
size_t
height;
ssize_t
quantum;
/*
Simulate a Polaroid picture.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
quantum=(ssize_t) MagickMax(MagickMax((double) image->columns,(double)
image->rows)/25.0,10.0);
height=image->rows+2*quantum;
caption_image=(Image *) NULL;
if (caption != (const char *) NULL)
{
char
*text;
/*
Generate caption image.
*/
caption_image=CloneImage(image,image->columns,1,MagickTrue,exception);
if (caption_image == (Image *) NULL)
return((Image *) NULL);
text=InterpretImageProperties((ImageInfo *) NULL,(Image *) image,caption,
exception);
if (text != (char *) NULL)
{
char
geometry[MagickPathExtent];
DrawInfo
*annotate_info;
MagickBooleanType
status;
ssize_t
count;
TypeMetric
metrics;
annotate_info=CloneDrawInfo((const ImageInfo *) NULL,draw_info);
(void) CloneString(&annotate_info->text,text);
count=FormatMagickCaption(caption_image,annotate_info,MagickTrue,
&metrics,&text,exception);
status=SetImageExtent(caption_image,image->columns,(size_t)
((count+1)*(metrics.ascent-metrics.descent)+0.5),exception);
if (status == MagickFalse)
caption_image=DestroyImage(caption_image);
else
{
caption_image->background_color=image->border_color;
(void) SetImageBackgroundColor(caption_image,exception);
(void) CloneString(&annotate_info->text,text);
(void) FormatLocaleString(geometry,MagickPathExtent,"+0+%.20g",
metrics.ascent);
if (annotate_info->gravity == UndefinedGravity)
(void) CloneString(&annotate_info->geometry,AcquireString(
geometry));
(void) AnnotateImage(caption_image,annotate_info,exception);
height+=caption_image->rows;
}
annotate_info=DestroyDrawInfo(annotate_info);
text=DestroyString(text);
}
}
picture_image=CloneImage(image,image->columns+2*quantum,height,MagickTrue,
exception);
if (picture_image == (Image *) NULL)
{
if (caption_image != (Image *) NULL)
caption_image=DestroyImage(caption_image);
return((Image *) NULL);
}
picture_image->background_color=image->border_color;
(void) SetImageBackgroundColor(picture_image,exception);
(void) CompositeImage(picture_image,image,OverCompositeOp,MagickTrue,quantum,
quantum,exception);
if (caption_image != (Image *) NULL)
{
(void) CompositeImage(picture_image,caption_image,OverCompositeOp,
MagickTrue,quantum,(ssize_t) (image->rows+3*quantum/2),exception);
caption_image=DestroyImage(caption_image);
}
(void) QueryColorCompliance("none",AllCompliance,
&picture_image->background_color,exception);
(void) SetImageAlphaChannel(picture_image,OpaqueAlphaChannel,exception);
rotate_image=RotateImage(picture_image,90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
bend_image=WaveImage(picture_image,0.01*picture_image->rows,2.0*
picture_image->columns,method,exception);
picture_image=DestroyImage(picture_image);
if (bend_image == (Image *) NULL)
return((Image *) NULL);
picture_image=bend_image;
rotate_image=RotateImage(picture_image,-90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
picture_image->background_color=image->background_color;
polaroid_image=ShadowImage(picture_image,80.0,2.0,quantum/3,quantum/3,
exception);
if (polaroid_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
flop_image=FlopImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (flop_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
polaroid_image=flop_image;
(void) CompositeImage(polaroid_image,picture_image,OverCompositeOp,
MagickTrue,(ssize_t) (-0.01*picture_image->columns/2.0),0L,exception);
picture_image=DestroyImage(picture_image);
(void) QueryColorCompliance("none",AllCompliance,
&polaroid_image->background_color,exception);
rotate_image=RotateImage(polaroid_image,angle,exception);
polaroid_image=DestroyImage(polaroid_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=rotate_image;
trim_image=TrimImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (trim_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=trim_image;
return(polaroid_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p i a T o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSepiaToneImage() applies a special effect to the image, similar to the
% effect achieved in a photo darkroom by sepia toning. Threshold ranges from
% 0 to QuantumRange and is a measure of the extent of the sepia toning. A
% threshold of 80% is a good starting point for a reasonable tone.
%
% The format of the SepiaToneImage method is:
%
% Image *SepiaToneImage(const Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: the tone threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SepiaToneImage(const Image *image,const double threshold,
ExceptionInfo *exception)
{
#define SepiaToneImageTag "SepiaTone/Image"
CacheView
*image_view,
*sepia_view;
Image
*sepia_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize sepia-toned image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
sepia_image=CloneImage(image,0,0,MagickTrue,exception);
if (sepia_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sepia_image,DirectClass,exception) == MagickFalse)
{
sepia_image=DestroyImage(sepia_image);
return((Image *) NULL);
}
/*
Tone each row of the image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
sepia_view=AcquireAuthenticCacheView(sepia_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,sepia_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(sepia_view,0,y,sepia_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
intensity,
tone;
intensity=GetPixelIntensity(image,p);
tone=intensity > threshold ? (double) QuantumRange : intensity+
(double) QuantumRange-threshold;
SetPixelRed(sepia_image,ClampToQuantum(tone),q);
tone=intensity > (7.0*threshold/6.0) ? (double) QuantumRange :
intensity+(double) QuantumRange-7.0*threshold/6.0;
SetPixelGreen(sepia_image,ClampToQuantum(tone),q);
tone=intensity < (threshold/6.0) ? 0 : intensity-threshold/6.0;
SetPixelBlue(sepia_image,ClampToQuantum(tone),q);
tone=threshold/7.0;
if ((double) GetPixelGreen(image,q) < tone)
SetPixelGreen(sepia_image,ClampToQuantum(tone),q);
if ((double) GetPixelBlue(image,q) < tone)
SetPixelBlue(sepia_image,ClampToQuantum(tone),q);
SetPixelAlpha(sepia_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(sepia_image);
}
if (SyncCacheViewAuthenticPixels(sepia_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SepiaToneImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sepia_view=DestroyCacheView(sepia_view);
image_view=DestroyCacheView(image_view);
(void) NormalizeImage(sepia_image,exception);
(void) ContrastImage(sepia_image,MagickTrue,exception);
if (status == MagickFalse)
sepia_image=DestroyImage(sepia_image);
return(sepia_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a d o w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShadowImage() simulates a shadow from the specified image and returns it.
%
% The format of the ShadowImage method is:
%
% Image *ShadowImage(const Image *image,const double alpha,
% const double sigma,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o alpha: percentage transparency.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x_offset: the shadow x-offset.
%
% o y_offset: the shadow y-offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShadowImage(const Image *image,const double alpha,
const double sigma,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define ShadowImageTag "Shadow/Image"
CacheView
*image_view;
ChannelType
channel_mask;
Image
*border_image,
*clone_image,
*shadow_image;
MagickBooleanType
status;
PixelInfo
background_color;
RectangleInfo
border_info;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(clone_image,sRGBColorspace,exception);
(void) SetImageVirtualPixelMethod(clone_image,EdgeVirtualPixelMethod,
exception);
border_info.width=(size_t) floor(2.0*sigma+0.5);
border_info.height=(size_t) floor(2.0*sigma+0.5);
border_info.x=0;
border_info.y=0;
(void) QueryColorCompliance("none",AllCompliance,&clone_image->border_color,
exception);
clone_image->alpha_trait=BlendPixelTrait;
border_image=BorderImage(clone_image,&border_info,OverCompositeOp,exception);
clone_image=DestroyImage(clone_image);
if (border_image == (Image *) NULL)
return((Image *) NULL);
if (border_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(border_image,OpaqueAlphaChannel,exception);
/*
Shadow image.
*/
status=MagickTrue;
background_color=border_image->background_color;
background_color.alpha_trait=BlendPixelTrait;
image_view=AcquireAuthenticCacheView(border_image,exception);
for (y=0; y < (ssize_t) border_image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,border_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) border_image->columns; x++)
{
if (border_image->alpha_trait != UndefinedPixelTrait)
background_color.alpha=GetPixelAlpha(border_image,q)*alpha/100.0;
SetPixelViaPixelInfo(border_image,&background_color,q);
q+=GetPixelChannels(border_image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
border_image=DestroyImage(border_image);
return((Image *) NULL);
}
channel_mask=SetImageChannelMask(border_image,AlphaChannel);
shadow_image=BlurImage(border_image,0.0,sigma,exception);
border_image=DestroyImage(border_image);
if (shadow_image == (Image *) NULL)
return((Image *) NULL);
(void) SetPixelChannelMask(shadow_image,channel_mask);
if (shadow_image->page.width == 0)
shadow_image->page.width=shadow_image->columns;
if (shadow_image->page.height == 0)
shadow_image->page.height=shadow_image->rows;
shadow_image->page.width+=x_offset-(ssize_t) border_info.width;
shadow_image->page.height+=y_offset-(ssize_t) border_info.height;
shadow_image->page.x+=x_offset-(ssize_t) border_info.width;
shadow_image->page.y+=y_offset-(ssize_t) border_info.height;
return(shadow_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S k e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SketchImage() simulates a pencil sketch. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma). For
% reasonable results, radius should be larger than sigma. Use a radius of 0
% and SketchImage() selects a suitable radius for you. Angle gives the angle
% of the sketch.
%
% The format of the SketchImage method is:
%
% Image *SketchImage(const Image *image,const double radius,
% const double sigma,const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the
% center pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o angle: apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SketchImage(const Image *image,const double radius,
const double sigma,const double angle,ExceptionInfo *exception)
{
CacheView
*random_view;
Image
*blend_image,
*blur_image,
*dodge_image,
*random_image,
*sketch_image;
MagickBooleanType
status;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Sketch image.
*/
random_image=CloneImage(image,image->columns << 1,image->rows << 1,
MagickTrue,exception);
if (random_image == (Image *) NULL)
return((Image *) NULL);
status=MagickTrue;
random_info=AcquireRandomInfoThreadSet();
random_view=AcquireAuthenticCacheView(random_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(random_image,random_image,random_image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) random_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(random_view,0,y,random_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) random_image->columns; x++)
{
double
value;
ssize_t
i;
value=GetPseudoRandomValue(random_info[id]);
for (i=0; i < (ssize_t) GetPixelChannels(random_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=ClampToQuantum(QuantumRange*value);
}
q+=GetPixelChannels(random_image);
}
if (SyncCacheViewAuthenticPixels(random_view,exception) == MagickFalse)
status=MagickFalse;
}
random_view=DestroyCacheView(random_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
{
random_image=DestroyImage(random_image);
return(random_image);
}
blur_image=MotionBlurImage(random_image,radius,sigma,angle,exception);
random_image=DestroyImage(random_image);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
dodge_image=EdgeImage(blur_image,radius,exception);
blur_image=DestroyImage(blur_image);
if (dodge_image == (Image *) NULL)
return((Image *) NULL);
status=ClampImage(dodge_image,exception);
if (status != MagickFalse)
status=NormalizeImage(dodge_image,exception);
if (status != MagickFalse)
status=NegateImage(dodge_image,MagickFalse,exception);
if (status != MagickFalse)
status=TransformImage(&dodge_image,(char *) NULL,"50%",exception);
sketch_image=CloneImage(image,0,0,MagickTrue,exception);
if (sketch_image == (Image *) NULL)
{
dodge_image=DestroyImage(dodge_image);
return((Image *) NULL);
}
(void) CompositeImage(sketch_image,dodge_image,ColorDodgeCompositeOp,
MagickTrue,0,0,exception);
dodge_image=DestroyImage(dodge_image);
blend_image=CloneImage(image,0,0,MagickTrue,exception);
if (blend_image == (Image *) NULL)
{
sketch_image=DestroyImage(sketch_image);
return((Image *) NULL);
}
if (blend_image->alpha_trait != BlendPixelTrait)
(void) SetImageAlpha(blend_image,TransparentAlpha,exception);
(void) SetImageArtifact(blend_image,"compose:args","20x80");
(void) CompositeImage(sketch_image,blend_image,BlendCompositeOp,MagickTrue,
0,0,exception);
blend_image=DestroyImage(blend_image);
return(sketch_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S o l a r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SolarizeImage() applies a special effect to the image, similar to the effect
% achieved in a photo darkroom by selectively exposing areas of photo
% sensitive paper to light. Threshold ranges from 0 to QuantumRange and is a
% measure of the extent of the solarization.
%
% The format of the SolarizeImage method is:
%
% MagickBooleanType SolarizeImage(Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: Define the extent of the solarization.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SolarizeImage(Image *image,
const double threshold,ExceptionInfo *exception)
{
#define SolarizeImageTag "Solarize/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
if (image->storage_class == PseudoClass)
{
ssize_t
i;
/*
Solarize colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((double) image->colormap[i].red > threshold)
image->colormap[i].red=QuantumRange-image->colormap[i].red;
if ((double) image->colormap[i].green > threshold)
image->colormap[i].green=QuantumRange-image->colormap[i].green;
if ((double) image->colormap[i].blue > threshold)
image->colormap[i].blue=QuantumRange-image->colormap[i].blue;
}
return(SyncImage(image,exception));
}
/*
Solarize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if ((double) q[i] > threshold)
q[i]=QuantumRange-q[i];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SolarizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e g a n o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SteganoImage() hides a digital watermark within the image. Recover
% the hidden watermark later to prove that the authenticity of an image.
% Offset defines the start position within the image to hide the watermark.
%
% The format of the SteganoImage method is:
%
% Image *SteganoImage(const Image *image,Image *watermark,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o watermark: the watermark image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SteganoImage(const Image *image,const Image *watermark,
ExceptionInfo *exception)
{
#define GetBit(alpha,i) ((((size_t) (alpha) >> (size_t) (i)) & 0x01) != 0)
#define SetBit(alpha,i,set) (Quantum) ((set) != 0 ? (size_t) (alpha) \
| (one << (size_t) (i)) : (size_t) (alpha) & ~(one << (size_t) (i)))
#define SteganoImageTag "Stegano/Image"
CacheView
*stegano_view,
*watermark_view;
Image
*stegano_image;
int
c;
MagickBooleanType
status;
PixelInfo
pixel;
Quantum
*q;
ssize_t
x;
size_t
depth,
one;
ssize_t
i,
j,
k,
y;
/*
Initialize steganographic image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(watermark != (const Image *) NULL);
assert(watermark->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
one=1UL;
stegano_image=CloneImage(image,0,0,MagickTrue,exception);
if (stegano_image == (Image *) NULL)
return((Image *) NULL);
stegano_image->depth=MAGICKCORE_QUANTUM_DEPTH;
if (SetImageStorageClass(stegano_image,DirectClass,exception) == MagickFalse)
{
stegano_image=DestroyImage(stegano_image);
return((Image *) NULL);
}
/*
Hide watermark in low-order bits of image.
*/
c=0;
i=0;
j=0;
depth=stegano_image->depth;
k=stegano_image->offset;
status=MagickTrue;
watermark_view=AcquireVirtualCacheView(watermark,exception);
stegano_view=AcquireAuthenticCacheView(stegano_image,exception);
for (i=(ssize_t) depth-1; (i >= 0) && (j < (ssize_t) depth); i--)
{
for (y=0; (y < (ssize_t) watermark->rows) && (j < (ssize_t) depth); y++)
{
for (x=0; (x < (ssize_t) watermark->columns) && (j < (ssize_t) depth); x++)
{
ssize_t
offset;
(void) GetOneCacheViewVirtualPixelInfo(watermark_view,x,y,&pixel,
exception);
offset=k/(ssize_t) stegano_image->columns;
if (offset >= (ssize_t) stegano_image->rows)
break;
q=GetCacheViewAuthenticPixels(stegano_view,k % (ssize_t)
stegano_image->columns,k/(ssize_t) stegano_image->columns,1,1,
exception);
if (q == (Quantum *) NULL)
break;
switch (c)
{
case 0:
{
SetPixelRed(stegano_image,SetBit(GetPixelRed(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
case 1:
{
SetPixelGreen(stegano_image,SetBit(GetPixelGreen(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
case 2:
{
SetPixelBlue(stegano_image,SetBit(GetPixelBlue(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
}
if (SyncCacheViewAuthenticPixels(stegano_view,exception) == MagickFalse)
break;
c++;
if (c == 3)
c=0;
k++;
if (k == (ssize_t) (stegano_image->columns*stegano_image->columns))
k=0;
if (k == stegano_image->offset)
j++;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SteganoImageTag,(MagickOffsetType)
(depth-i),depth);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
stegano_view=DestroyCacheView(stegano_view);
watermark_view=DestroyCacheView(watermark_view);
if (status == MagickFalse)
stegano_image=DestroyImage(stegano_image);
return(stegano_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e r e o A n a g l y p h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StereoAnaglyphImage() combines two images and produces a single image that
% is the composite of a left and right image of a stereo pair. Special
% red-green stereo glasses are required to view this effect.
%
% The format of the StereoAnaglyphImage method is:
%
% Image *StereoImage(const Image *left_image,const Image *right_image,
% ExceptionInfo *exception)
% Image *StereoAnaglyphImage(const Image *left_image,
% const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o left_image: the left image.
%
% o right_image: the right image.
%
% o exception: return any errors or warnings in this structure.
%
% o x_offset: amount, in pixels, by which the left image is offset to the
% right of the right image.
%
% o y_offset: amount, in pixels, by which the left image is offset to the
% bottom of the right image.
%
%
*/
MagickExport Image *StereoImage(const Image *left_image,
const Image *right_image,ExceptionInfo *exception)
{
return(StereoAnaglyphImage(left_image,right_image,0,0,exception));
}
MagickExport Image *StereoAnaglyphImage(const Image *left_image,
const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define StereoImageTag "Stereo/Image"
const Image
*image;
Image
*stereo_image;
MagickBooleanType
status;
ssize_t
y;
assert(left_image != (const Image *) NULL);
assert(left_image->signature == MagickCoreSignature);
if (left_image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
left_image->filename);
assert(right_image != (const Image *) NULL);
assert(right_image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=left_image;
if ((left_image->columns != right_image->columns) ||
(left_image->rows != right_image->rows))
ThrowImageException(ImageError,"LeftAndRightImageSizesDiffer");
/*
Initialize stereo image attributes.
*/
stereo_image=CloneImage(left_image,left_image->columns,left_image->rows,
MagickTrue,exception);
if (stereo_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(stereo_image,DirectClass,exception) == MagickFalse)
{
stereo_image=DestroyImage(stereo_image);
return((Image *) NULL);
}
(void) SetImageColorspace(stereo_image,sRGBColorspace,exception);
/*
Copy left image to red channel and right image to blue channel.
*/
status=MagickTrue;
for (y=0; y < (ssize_t) stereo_image->rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
Quantum
*magick_restrict r;
p=GetVirtualPixels(left_image,-x_offset,y-y_offset,image->columns,1,
exception);
q=GetVirtualPixels(right_image,0,y,right_image->columns,1,exception);
r=QueueAuthenticPixels(stereo_image,0,y,stereo_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL) ||
(r == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) stereo_image->columns; x++)
{
SetPixelRed(stereo_image,GetPixelRed(left_image,p),r);
SetPixelGreen(stereo_image,GetPixelGreen(right_image,q),r);
SetPixelBlue(stereo_image,GetPixelBlue(right_image,q),r);
if ((GetPixelAlphaTraits(stereo_image) & CopyPixelTrait) != 0)
SetPixelAlpha(stereo_image,(GetPixelAlpha(left_image,p)+
GetPixelAlpha(right_image,q))/2,r);
p+=GetPixelChannels(left_image);
q+=GetPixelChannels(right_image);
r+=GetPixelChannels(stereo_image);
}
if (SyncAuthenticPixels(stereo_image,exception) == MagickFalse)
break;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,StereoImageTag,(MagickOffsetType) y,
stereo_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
if (status == MagickFalse)
stereo_image=DestroyImage(stereo_image);
return(stereo_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S w i r l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SwirlImage() swirls the pixels about the center of the image, where
% degrees indicates the sweep of the arc through which each pixel is moved.
% You get a more dramatic effect as the degrees move from 1 to 360.
%
% The format of the SwirlImage method is:
%
% Image *SwirlImage(const Image *image,double degrees,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o degrees: Define the tightness of the swirling effect.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SwirlImage(const Image *image,double degrees,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define SwirlImageTag "Swirl/Image"
CacheView
*canvas_view,
*interpolate_view,
*swirl_view;
double
radius;
Image
*canvas_image,
*swirl_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize swirl image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
swirl_image=CloneImage(canvas_image,0,0,MagickTrue,exception);
if (swirl_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
if (SetImageStorageClass(swirl_image,DirectClass,exception) == MagickFalse)
{
canvas_image=DestroyImage(canvas_image);
swirl_image=DestroyImage(swirl_image);
return((Image *) NULL);
}
if (swirl_image->background_color.alpha_trait != UndefinedPixelTrait)
(void) SetImageAlphaChannel(swirl_image,OnAlphaChannel,exception);
/*
Compute scaling factor.
*/
center.x=(double) canvas_image->columns/2.0;
center.y=(double) canvas_image->rows/2.0;
radius=MagickMax(center.x,center.y);
scale.x=1.0;
scale.y=1.0;
if (canvas_image->columns > canvas_image->rows)
scale.y=(double) canvas_image->columns/(double) canvas_image->rows;
else
if (canvas_image->columns < canvas_image->rows)
scale.x=(double) canvas_image->rows/(double) canvas_image->columns;
degrees=(double) DegreesToRadians(degrees);
/*
Swirl image.
*/
status=MagickTrue;
progress=0;
canvas_view=AcquireVirtualCacheView(canvas_image,exception);
interpolate_view=AcquireVirtualCacheView(image,exception);
swirl_view=AcquireAuthenticCacheView(swirl_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(canvas_image,swirl_image,canvas_image->rows,1)
#endif
for (y=0; y < (ssize_t) canvas_image->rows; y++)
{
double
distance;
PointInfo
delta;
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(swirl_view,0,y,swirl_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
delta.y=scale.y*(double) (y-center.y);
for (x=0; x < (ssize_t) canvas_image->columns; x++)
{
/*
Determine if the pixel is within an ellipse.
*/
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance >= (radius*radius))
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(canvas_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(canvas_image,i);
PixelTrait traits = GetPixelChannelTraits(canvas_image,channel);
PixelTrait swirl_traits = GetPixelChannelTraits(swirl_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(swirl_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(swirl_image,channel,p[i],q);
}
}
else
{
double
cosine,
factor,
sine;
/*
Swirl the pixel.
*/
factor=1.0-sqrt((double) distance)/radius;
sine=sin((double) (degrees*factor*factor));
cosine=cos((double) (degrees*factor*factor));
status=InterpolatePixelChannels(canvas_image,interpolate_view,
swirl_image,method,((cosine*delta.x-sine*delta.y)/scale.x+center.x),
(double) ((sine*delta.x+cosine*delta.y)/scale.y+center.y),q,
exception);
if (status == MagickFalse)
break;
}
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(swirl_image);
}
if (SyncCacheViewAuthenticPixels(swirl_view,exception) == MagickFalse)
status=MagickFalse;
if (canvas_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(canvas_image,SwirlImageTag,progress,
canvas_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
swirl_view=DestroyCacheView(swirl_view);
interpolate_view=DestroyCacheView(interpolate_view);
canvas_view=DestroyCacheView(canvas_view);
canvas_image=DestroyImage(canvas_image);
if (status == MagickFalse)
swirl_image=DestroyImage(swirl_image);
return(swirl_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TintImage() applies a color vector to each pixel in the image. The length
% of the vector is 0 for black and white and at its maximum for the midtones.
% The vector weighting function is f(x)=(1-(4.0*((x-0.5)*(x-0.5))))
%
% The format of the TintImage method is:
%
% Image *TintImage(const Image *image,const char *blend,
% const PixelInfo *tint,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o blend: A color value used for tinting.
%
% o tint: A color value used for tinting.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TintImage(const Image *image,const char *blend,
const PixelInfo *tint,ExceptionInfo *exception)
{
#define TintImageTag "Tint/Image"
CacheView
*image_view,
*tint_view;
double
intensity;
GeometryInfo
geometry_info;
Image
*tint_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
color_vector;
MagickStatusType
flags;
ssize_t
y;
/*
Allocate tint image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
tint_image=CloneImage(image,0,0,MagickTrue,exception);
if (tint_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(tint_image,DirectClass,exception) == MagickFalse)
{
tint_image=DestroyImage(tint_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
(IsPixelInfoGray(tint) == MagickFalse))
(void) SetImageColorspace(tint_image,sRGBColorspace,exception);
if (blend == (const char *) NULL)
return(tint_image);
/*
Determine RGB values of the color.
*/
GetPixelInfo(image,&color_vector);
flags=ParseGeometry(blend,&geometry_info);
color_vector.red=geometry_info.rho;
color_vector.green=geometry_info.rho;
color_vector.blue=geometry_info.rho;
color_vector.alpha=(MagickRealType) OpaqueAlpha;
if ((flags & SigmaValue) != 0)
color_vector.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
color_vector.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
color_vector.alpha=geometry_info.psi;
if (image->colorspace == CMYKColorspace)
{
color_vector.black=geometry_info.rho;
if ((flags & PsiValue) != 0)
color_vector.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
color_vector.alpha=geometry_info.chi;
}
intensity=(double) GetPixelInfoIntensity((const Image *) NULL,tint);
color_vector.red=(double) (color_vector.red*tint->red/100.0-intensity);
color_vector.green=(double) (color_vector.green*tint->green/100.0-intensity);
color_vector.blue=(double) (color_vector.blue*tint->blue/100.0-intensity);
color_vector.black=(double) (color_vector.black*tint->black/100.0-intensity);
color_vector.alpha=(double) (color_vector.alpha*tint->alpha/100.0-intensity);
/*
Tint image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
tint_view=AcquireAuthenticCacheView(tint_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,tint_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(tint_view,0,y,tint_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelInfo
pixel;
double
weight;
GetPixelInfo(image,&pixel);
weight=QuantumScale*GetPixelRed(image,p)-0.5;
pixel.red=(MagickRealType) GetPixelRed(image,p)+color_vector.red*
(1.0-(4.0*(weight*weight)));
weight=QuantumScale*GetPixelGreen(image,p)-0.5;
pixel.green=(MagickRealType) GetPixelGreen(image,p)+color_vector.green*
(1.0-(4.0*(weight*weight)));
weight=QuantumScale*GetPixelBlue(image,p)-0.5;
pixel.blue=(MagickRealType) GetPixelBlue(image,p)+color_vector.blue*
(1.0-(4.0*(weight*weight)));
weight=QuantumScale*GetPixelBlack(image,p)-0.5;
pixel.black=(MagickRealType) GetPixelBlack(image,p)+color_vector.black*
(1.0-(4.0*(weight*weight)));
pixel.alpha=(MagickRealType) GetPixelAlpha(image,p);
SetPixelViaPixelInfo(tint_image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(tint_image);
}
if (SyncCacheViewAuthenticPixels(tint_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TintImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
tint_view=DestroyCacheView(tint_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
tint_image=DestroyImage(tint_image);
return(tint_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% V i g n e t t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% VignetteImage() softens the edges of the image in vignette style.
%
% The format of the VignetteImage method is:
%
% Image *VignetteImage(const Image *image,const double radius,
% const double sigma,const ssize_t x,const ssize_t y,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x, y: Define the x and y ellipse offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *VignetteImage(const Image *image,const double radius,
const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception)
{
char
ellipse[MagickPathExtent];
DrawInfo
*draw_info;
Image
*canvas,
*blur_image,
*oval_image,
*vignette_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas=CloneImage(image,0,0,MagickTrue,exception);
if (canvas == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(canvas,DirectClass,exception) == MagickFalse)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
canvas->alpha_trait=BlendPixelTrait;
oval_image=CloneImage(canvas,canvas->columns,canvas->rows,MagickTrue,
exception);
if (oval_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
(void) QueryColorCompliance("#000000",AllCompliance,
&oval_image->background_color,exception);
(void) SetImageBackgroundColor(oval_image,exception);
draw_info=CloneDrawInfo((const ImageInfo *) NULL,(const DrawInfo *) NULL);
(void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->stroke,
exception);
(void) FormatLocaleString(ellipse,MagickPathExtent,"ellipse %g,%g,%g,%g,"
"0.0,360.0",image->columns/2.0,image->rows/2.0,image->columns/2.0-x,
image->rows/2.0-y);
draw_info->primitive=AcquireString(ellipse);
(void) DrawImage(oval_image,draw_info,exception);
draw_info=DestroyDrawInfo(draw_info);
blur_image=BlurImage(oval_image,radius,sigma,exception);
oval_image=DestroyImage(oval_image);
if (blur_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
blur_image->alpha_trait=UndefinedPixelTrait;
(void) CompositeImage(canvas,blur_image,IntensityCompositeOp,MagickTrue,
0,0,exception);
blur_image=DestroyImage(blur_image);
vignette_image=MergeImageLayers(canvas,FlattenLayer,exception);
canvas=DestroyImage(canvas);
if (vignette_image != (Image *) NULL)
(void) TransformImageColorspace(vignette_image,image->colorspace,exception);
return(vignette_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveImage() creates a "ripple" effect in the image by shifting the pixels
% vertically along a sine wave whose amplitude and wavelength is specified
% by the given parameters.
%
% The format of the WaveImage method is:
%
% Image *WaveImage(const Image *image,const double amplitude,
% const double wave_length,const PixelInterpolateMethod method,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o amplitude, wave_length: Define the amplitude and wave length of the
% sine wave.
%
% o interpolate: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *WaveImage(const Image *image,const double amplitude,
const double wave_length,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
#define WaveImageTag "Wave/Image"
CacheView
*canvas_image_view,
*wave_view;
float
*sine_map;
Image
*canvas_image,
*wave_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
y;
/*
Initialize wave image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
if ((canvas_image->alpha_trait == UndefinedPixelTrait) &&
(canvas_image->background_color.alpha != OpaqueAlpha))
(void) SetImageAlpha(canvas_image,OpaqueAlpha,exception);
wave_image=CloneImage(canvas_image,canvas_image->columns,(size_t)
(canvas_image->rows+2.0*fabs(amplitude)),MagickTrue,exception);
if (wave_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
if (SetImageStorageClass(wave_image,DirectClass,exception) == MagickFalse)
{
canvas_image=DestroyImage(canvas_image);
wave_image=DestroyImage(wave_image);
return((Image *) NULL);
}
/*
Allocate sine map.
*/
sine_map=(float *) AcquireQuantumMemory((size_t) wave_image->columns,
sizeof(*sine_map));
if (sine_map == (float *) NULL)
{
canvas_image=DestroyImage(canvas_image);
wave_image=DestroyImage(wave_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) wave_image->columns; i++)
sine_map[i]=(float) fabs(amplitude)+amplitude*sin((double)
((2.0*MagickPI*i)*PerceptibleReciprocal(wave_length)));
/*
Wave image.
*/
status=MagickTrue;
progress=0;
canvas_image_view=AcquireVirtualCacheView(canvas_image,exception);
wave_view=AcquireAuthenticCacheView(wave_image,exception);
(void) SetCacheViewVirtualPixelMethod(canvas_image_view,
BackgroundVirtualPixelMethod);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(canvas_image,wave_image,wave_image->rows,1)
#endif
for (y=0; y < (ssize_t) wave_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_image_view,0,y,canvas_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(wave_view,0,y,wave_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) wave_image->columns; x++)
{
status=InterpolatePixelChannels(canvas_image,canvas_image_view,
wave_image,method,(double) x,(double) (y-sine_map[x]),q,exception);
if (status == MagickFalse)
break;
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(wave_image);
}
if (SyncCacheViewAuthenticPixels(wave_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(canvas_image,WaveImageTag,progress,
canvas_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
wave_view=DestroyCacheView(wave_view);
canvas_image_view=DestroyCacheView(canvas_image_view);
canvas_image=DestroyImage(canvas_image);
sine_map=(float *) RelinquishMagickMemory(sine_map);
if (status == MagickFalse)
wave_image=DestroyImage(wave_image);
return(wave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e l e t D e n o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveletDenoiseImage() removes noise from the image using a wavelet
% transform. The wavelet transform is a fast hierarchical scheme for
% processing an image using a set of consecutive lowpass and high_pass filters,
% followed by a decimation. This results in a decomposition into different
% scales which can be regarded as different “frequency bands”, determined by
% the mother wavelet. Adapted from dcraw.c by David Coffin.
%
% The format of the WaveletDenoiseImage method is:
%
% Image *WaveletDenoiseImage(const Image *image,const double threshold,
% const double softness,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: set the threshold for smoothing.
%
% o softness: attenuate the smoothing threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void HatTransform(const float *magick_restrict pixels,
const size_t stride,const size_t extent,const size_t scale,float *kernel)
{
const float
*magick_restrict p,
*magick_restrict q,
*magick_restrict r;
ssize_t
i;
p=pixels;
q=pixels+scale*stride;
r=pixels+scale*stride;
for (i=0; i < (ssize_t) scale; i++)
{
kernel[i]=0.25f*(*p+(*p)+(*q)+(*r));
p+=stride;
q-=stride;
r+=stride;
}
for ( ; i < (ssize_t) (extent-scale); i++)
{
kernel[i]=0.25f*(2.0f*(*p)+*(p-scale*stride)+*(p+scale*stride));
p+=stride;
}
q=p-scale*stride;
r=pixels+stride*(extent-2);
for ( ; i < (ssize_t) extent; i++)
{
kernel[i]=0.25f*(*p+(*p)+(*q)+(*r));
p+=stride;
q+=stride;
r-=stride;
}
}
MagickExport Image *WaveletDenoiseImage(const Image *image,
const double threshold,const double softness,ExceptionInfo *exception)
{
CacheView
*image_view,
*noise_view;
float
*kernel,
*pixels;
Image
*noise_image;
MagickBooleanType
status;
MagickSizeType
number_pixels;
MemoryInfo
*pixels_info;
ssize_t
channel;
static const float
noise_levels[] = { 0.8002f, 0.2735f, 0.1202f, 0.0585f, 0.0291f, 0.0152f,
0.0080f, 0.0044f };
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
noise_image=AccelerateWaveletDenoiseImage(image,threshold,exception);
if (noise_image != (Image *) NULL)
return(noise_image);
#endif
noise_image=CloneImage(image,0,0,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse)
{
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
if (AcquireMagickResource(WidthResource,4*image->columns) == MagickFalse)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
pixels_info=AcquireVirtualMemory(3*image->columns,image->rows*
sizeof(*pixels));
kernel=(float *) AcquireQuantumMemory(MagickMax(image->rows,image->columns)+1,
GetOpenMPMaximumThreads()*sizeof(*kernel));
if ((pixels_info == (MemoryInfo *) NULL) || (kernel == (float *) NULL))
{
if (kernel != (float *) NULL)
kernel=(float *) RelinquishMagickMemory(kernel);
if (pixels_info != (MemoryInfo *) NULL)
pixels_info=RelinquishVirtualMemory(pixels_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
pixels=(float *) GetVirtualMemoryBlob(pixels_info);
status=MagickTrue;
number_pixels=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
for (channel=0; channel < (ssize_t) GetPixelChannels(image); channel++)
{
ssize_t
i;
size_t
high_pass,
low_pass;
ssize_t
level,
y;
PixelChannel
pixel_channel;
PixelTrait
traits;
if (status == MagickFalse)
continue;
traits=GetPixelChannelTraits(image,(PixelChannel) channel);
if (traits == UndefinedPixelTrait)
continue;
pixel_channel=GetPixelChannelChannel(image,channel);
if ((pixel_channel != RedPixelChannel) &&
(pixel_channel != GreenPixelChannel) &&
(pixel_channel != BluePixelChannel))
continue;
/*
Copy channel from image to wavelet pixel array.
*/
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
pixels[i++]=(float) p[channel];
p+=GetPixelChannels(image);
}
}
/*
Low pass filter outputs are called approximation kernel & high pass
filters are referred to as detail kernel. The detail kernel
have high values in the noisy parts of the signal.
*/
high_pass=0;
for (level=0; level < 5; level++)
{
double
magnitude;
ssize_t
x;
low_pass=(size_t) (number_pixels*((level & 0x01)+1));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
float
*magick_restrict p,
*magick_restrict q;
ssize_t
c;
p=kernel+id*image->columns;
q=pixels+y*image->columns;
HatTransform(q+high_pass,1,image->columns,((size_t) 1UL << level),p);
q+=low_pass;
for (c=0; c < (ssize_t) image->columns; c++)
*q++=(*p++);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) \
magick_number_threads(image,image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
float
*magick_restrict p,
*magick_restrict q;
ssize_t
r;
p=kernel+id*image->rows;
q=pixels+x+low_pass;
HatTransform(q,image->columns,image->rows,((size_t) 1UL << level),p);
for (r=0; r < (ssize_t) image->rows; r++)
{
*q=(*p++);
q+=image->columns;
}
}
/*
To threshold, each coefficient is compared to a threshold value and
attenuated / shrunk by some factor.
*/
magnitude=threshold*noise_levels[level];
for (i=0; i < (ssize_t) number_pixels; ++i)
{
pixels[high_pass+i]-=pixels[low_pass+i];
if (pixels[high_pass+i] < -magnitude)
pixels[high_pass+i]+=magnitude-softness*magnitude;
else
if (pixels[high_pass+i] > magnitude)
pixels[high_pass+i]-=magnitude-softness*magnitude;
else
pixels[high_pass+i]*=softness;
if (high_pass != 0)
pixels[i]+=pixels[high_pass+i];
}
high_pass=low_pass;
}
/*
Reconstruct image from the thresholded wavelet kernel.
*/
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
Quantum
*magick_restrict q;
ssize_t
x;
ssize_t
offset;
q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
break;
}
offset=GetPixelChannelOffset(noise_image,pixel_channel);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
pixel;
pixel=(MagickRealType) pixels[i]+pixels[low_pass+i];
q[offset]=ClampToQuantum(pixel);
i++;
q+=GetPixelChannels(noise_image);
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AddNoiseImageTag,(MagickOffsetType)
channel,GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
kernel=(float *) RelinquishMagickMemory(kernel);
pixels_info=RelinquishVirtualMemory(pixels_info);
if (status == MagickFalse)
noise_image=DestroyImage(noise_image);
return(noise_image);
}
|
Parser.h | //===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Parser interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Availability.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
#include <memory>
#include <stack>
namespace clang {
class PragmaHandler;
class Scope;
class BalancedDelimiterTracker;
class CorrectionCandidateCallback;
class DeclGroupRef;
class DiagnosticBuilder;
struct LoopHint;
class Parser;
class ParsingDeclRAIIObject;
class ParsingDeclSpec;
class ParsingDeclarator;
class ParsingFieldDeclarator;
class ColonProtectionRAIIObject;
class InMessageExpressionRAIIObject;
class PoisonSEHIdentifiersRAIIObject;
class OMPClause;
class ObjCTypeParamList;
class ObjCTypeParameter;
/// Parser - This implements a parser for the C family of languages. After
/// parsing units of the grammar, productions are invoked to handle whatever has
/// been read.
///
class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class InMessageExpressionRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
friend class BalancedDelimiterTracker;
Preprocessor &PP;
/// Tok - The current token we are peeking ahead. All parsing methods assume
/// that this is valid.
Token Tok;
// PrevTokLocation - The location of the token we previously
// consumed. This token is used for diagnostics where we expected to
// see a token following another token (e.g., the ';' at the end of
// a statement).
SourceLocation PrevTokLocation;
/// Tracks an expected type for the current token when parsing an expression.
/// Used by code completion for ranking.
PreferredTypeBuilder PreferredType;
unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0;
unsigned short MisplacedModuleBeginCount = 0;
/// Actions - These are the callbacks we invoke as we parse various constructs
/// in the file.
Sema &Actions;
DiagnosticsEngine &Diags;
/// ScopeCache - Cache scopes to reduce malloc traffic.
enum { ScopeCacheSize = 16 };
unsigned NumCachedScopes;
Scope *ScopeCache[ScopeCacheSize];
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
// __except block
IdentifierInfo *Ident__exception_code,
*Ident___exception_code,
*Ident_GetExceptionCode;
// __except filter expression
IdentifierInfo *Ident__exception_info,
*Ident___exception_info,
*Ident_GetExceptionInfo;
// __finally
IdentifierInfo *Ident__abnormal_termination,
*Ident___abnormal_termination,
*Ident_AbnormalTermination;
/// Contextual keywords for Microsoft extensions.
IdentifierInfo *Ident__except;
mutable IdentifierInfo *Ident_sealed;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
/// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and
/// "bool" fast comparison. Only present if AltiVec or ZVector are enabled.
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_bool;
/// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
/// Only present if AltiVec enabled.
IdentifierInfo *Ident_pixel;
/// Objective-C contextual keywords.
IdentifierInfo *Ident_instancetype;
/// Identifier for "introduced".
IdentifierInfo *Ident_introduced;
/// Identifier for "deprecated".
IdentifierInfo *Ident_deprecated;
/// Identifier for "obsoleted".
IdentifierInfo *Ident_obsoleted;
/// Identifier for "unavailable".
IdentifierInfo *Ident_unavailable;
/// Identifier for "message".
IdentifierInfo *Ident_message;
/// Identifier for "strict".
IdentifierInfo *Ident_strict;
/// Identifier for "replacement".
IdentifierInfo *Ident_replacement;
/// Identifiers used by the 'external_source_symbol' attribute.
IdentifierInfo *Ident_language, *Ident_defined_in,
*Ident_generated_declaration;
/// C++11 contextual keywords.
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_GNU_final;
mutable IdentifierInfo *Ident_override;
// C++2a contextual keywords.
mutable IdentifierInfo *Ident_import;
mutable IdentifierInfo *Ident_module;
// C++ type trait keywords that can be reverted to identifiers and still be
// used as type traits.
llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits;
std::unique_ptr<PragmaHandler> AlignHandler;
std::unique_ptr<PragmaHandler> GCCVisibilityHandler;
std::unique_ptr<PragmaHandler> OptionsHandler;
std::unique_ptr<PragmaHandler> PackHandler;
std::unique_ptr<PragmaHandler> MSStructHandler;
std::unique_ptr<PragmaHandler> UnusedHandler;
std::unique_ptr<PragmaHandler> WeakHandler;
std::unique_ptr<PragmaHandler> RedefineExtnameHandler;
std::unique_ptr<PragmaHandler> FPContractHandler;
std::unique_ptr<PragmaHandler> OpenCLExtensionHandler;
std::unique_ptr<PragmaHandler> OpenMPHandler;
std::unique_ptr<PragmaHandler> PCSectionHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
std::unique_ptr<PragmaHandler> MSInitSeg;
std::unique_ptr<PragmaHandler> MSDataSeg;
std::unique_ptr<PragmaHandler> MSBSSSeg;
std::unique_ptr<PragmaHandler> MSConstSeg;
std::unique_ptr<PragmaHandler> MSCodeSeg;
std::unique_ptr<PragmaHandler> MSSection;
std::unique_ptr<PragmaHandler> MSRuntimeChecks;
std::unique_ptr<PragmaHandler> MSIntrinsic;
std::unique_ptr<PragmaHandler> MSOptimize;
std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler;
std::unique_ptr<PragmaHandler> OptimizeHandler;
std::unique_ptr<PragmaHandler> LoopHintHandler;
std::unique_ptr<PragmaHandler> UnrollHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollHintHandler;
std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> FPHandler;
std::unique_ptr<PragmaHandler> STDCFENVHandler;
std::unique_ptr<PragmaHandler> STDCCXLIMITHandler;
std::unique_ptr<PragmaHandler> STDCUnknownHandler;
std::unique_ptr<PragmaHandler> AttributePragmaHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
/// template argument list, where the '>' closes the template
/// argument list.
bool GreaterThanIsOperator;
/// ColonIsSacred - When this is false, we aggressively try to recover from
/// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
/// safe in case statements and a few other things. This is managed by the
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
/// When true, we are directly inside an Objective-C message
/// send expression.
///
/// This is managed by the \c InMessageExpressionRAIIObject class, and
/// should not be set directly.
bool InMessageExpression;
/// Gets set to true after calling ProduceSignatureHelp, it is for a
/// workaround to make sure ProduceSignatureHelp is only called at the deepest
/// function call.
bool CalledSignatureHelp = false;
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
/// RAII class that manages the template parameter depth.
class TemplateParameterDepthRAII {
unsigned &Depth;
unsigned AddedLevels;
public:
explicit TemplateParameterDepthRAII(unsigned &Depth)
: Depth(Depth), AddedLevels(0) {}
~TemplateParameterDepthRAII() {
Depth -= AddedLevels;
}
void operator++() {
++Depth;
++AddedLevels;
}
void addDepth(unsigned D) {
Depth += D;
AddedLevels += D;
}
void setAddedDepth(unsigned D) {
Depth = Depth - AddedLevels + D;
AddedLevels = D;
}
unsigned getDepth() const { return Depth; }
unsigned getOriginalDepth() const { return Depth - AddedLevels; }
};
/// Factory object for creating ParsedAttr objects.
AttributeFactory AttrFactory;
/// Gathers and cleans up TemplateIdAnnotations when parsing of a
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
/// Identifiers which have been declared within a tentative parse.
SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
/// Tracker for '<' tokens that might have been intended to be treated as an
/// angle bracket instead of a less-than comparison.
///
/// This happens when the user intends to form a template-id, but typoes the
/// template-name or forgets a 'template' keyword for a dependent template
/// name.
///
/// We track these locations from the point where we see a '<' with a
/// name-like expression on its left until we see a '>' or '>>' that might
/// match it.
struct AngleBracketTracker {
/// Flags used to rank candidate template names when there is more than one
/// '<' in a scope.
enum Priority : unsigned short {
/// A non-dependent name that is a potential typo for a template name.
PotentialTypo = 0x0,
/// A dependent name that might instantiate to a template-name.
DependentName = 0x2,
/// A space appears before the '<' token.
SpaceBeforeLess = 0x0,
/// No space before the '<' token
NoSpaceBeforeLess = 0x1,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName)
};
struct Loc {
Expr *TemplateName;
SourceLocation LessLoc;
AngleBracketTracker::Priority Priority;
unsigned short ParenCount, BracketCount, BraceCount;
bool isActive(Parser &P) const {
return P.ParenCount == ParenCount && P.BracketCount == BracketCount &&
P.BraceCount == BraceCount;
}
bool isActiveOrNested(Parser &P) const {
return isActive(P) || P.ParenCount > ParenCount ||
P.BracketCount > BracketCount || P.BraceCount > BraceCount;
}
};
SmallVector<Loc, 8> Locs;
/// Add an expression that might have been intended to be a template name.
/// In the case of ambiguity, we arbitrarily select the innermost such
/// expression, for example in 'foo < bar < baz', 'bar' is the current
/// candidate. No attempt is made to track that 'foo' is also a candidate
/// for the case where we see a second suspicious '>' token.
void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc,
Priority Prio) {
if (!Locs.empty() && Locs.back().isActive(P)) {
if (Locs.back().Priority <= Prio) {
Locs.back().TemplateName = TemplateName;
Locs.back().LessLoc = LessLoc;
Locs.back().Priority = Prio;
}
} else {
Locs.push_back({TemplateName, LessLoc, Prio,
P.ParenCount, P.BracketCount, P.BraceCount});
}
}
/// Mark the current potential missing template location as having been
/// handled (this happens if we pass a "corresponding" '>' or '>>' token
/// or leave a bracket scope).
void clear(Parser &P) {
while (!Locs.empty() && Locs.back().isActiveOrNested(P))
Locs.pop_back();
}
/// Get the current enclosing expression that might hve been intended to be
/// a template name.
Loc *getCurrent(Parser &P) {
if (!Locs.empty() && Locs.back().isActive(P))
return &Locs.back();
return nullptr;
}
};
AngleBracketTracker AngleBrackets;
IdentifierInfo *getSEHExceptKeyword();
/// True if we are within an Objective-C container while parsing C-like decls.
///
/// This is necessary because Sema thinks we have left the container
/// to parse the C-like decls, meaning Actions.getObjCDeclContext() will
/// be NULL.
bool ParsingInObjCContainer;
/// Whether to skip parsing of function bodies.
///
/// This option can be used, for example, to speed up searches for
/// declarations/definitions when indexing.
bool SkipFunctionBodies;
/// The location of the expression statement that is being parsed right now.
/// Used to determine if an expression that is being parsed is a statement or
/// just a regular sub-expression.
SourceLocation ExprStatementTokLoc;
/// Flags describing a context in which we're parsing a statement.
enum class ParsedStmtContext {
/// This context permits declarations in language modes where declarations
/// are not statements.
AllowDeclarationsInC = 0x1,
/// This context permits standalone OpenMP directives.
AllowStandaloneOpenMPDirectives = 0x2,
/// This context is at the top level of a GNU statement expression.
InStmtExpr = 0x4,
/// The context of a regular substatement.
SubStmt = 0,
/// The context of a compound-statement.
Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives,
LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr)
};
/// Act on an expression statement that might be the last statement in a
/// GNU statement expression. Checks whether we are actually at the end of
/// a statement expression and builds a suitable expression statement.
StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx);
public:
Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
~Parser() override;
const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
AttributeFactory &getAttrFactory() { return AttrFactory; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
void incrementMSManglingNumber() const {
return Actions.incrementMSManglingNumber();
}
Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists;
typedef Sema::FullExprArg FullExprArg;
// Parsing methods.
/// Initialize - Warm up the parser.
///
void Initialize();
/// Parse the first top-level declaration in a translation unit.
bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result);
/// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false);
bool ParseTopLevelDecl() {
DeclGroupPtrTy Result;
return ParseTopLevelDecl(Result);
}
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with special tokens: string literals, code completion,
/// annotation tokens and balanced tokens must be handled using the specific
/// consume methods.
/// Returns the location of the consumed token.
SourceLocation ConsumeToken() {
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
bool TryConsumeToken(tok::TokenKind Expected) {
if (Tok.isNot(Expected))
return false;
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return true;
}
bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) {
if (!TryConsumeToken(Expected))
return false;
Loc = PrevTokLocation;
return true;
}
/// ConsumeAnyToken - Dispatch to the right Consume* method based on the
/// current token type. This should only be used in cases where the type of
/// the token really isn't known, e.g. in error recovery.
SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) {
if (isTokenParen())
return ConsumeParen();
if (isTokenBracket())
return ConsumeBracket();
if (isTokenBrace())
return ConsumeBrace();
if (isTokenStringLiteral())
return ConsumeStringToken();
if (Tok.is(tok::code_completion))
return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken()
: handleUnexpectedCodeCompletionToken();
if (Tok.isAnnotation())
return ConsumeAnnotationToken();
return ConsumeToken();
}
SourceLocation getEndOfPreviousToken() {
return PP.getLocForEndOfToken(PrevTokLocation);
}
/// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds
/// to the given nullability kind.
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) {
return Actions.getNullabilityKeyword(nullability);
}
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
//
/// isTokenParen - Return true if the cur token is '(' or ')'.
bool isTokenParen() const {
return Tok.isOneOf(tok::l_paren, tok::r_paren);
}
/// isTokenBracket - Return true if the cur token is '[' or ']'.
bool isTokenBracket() const {
return Tok.isOneOf(tok::l_square, tok::r_square);
}
/// isTokenBrace - Return true if the cur token is '{' or '}'.
bool isTokenBrace() const {
return Tok.isOneOf(tok::l_brace, tok::r_brace);
}
/// isTokenStringLiteral - True if this token is a string-literal.
bool isTokenStringLiteral() const {
return tok::isStringLiteral(Tok.getKind());
}
/// isTokenSpecial - True if this token requires special consumption methods.
bool isTokenSpecial() const {
return isTokenStringLiteral() || isTokenParen() || isTokenBracket() ||
isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation();
}
/// Returns true if the current token is '=' or is a type of '='.
/// For typos, give a fixit to '='
bool isTokenEqualOrEqualTypo();
/// Return the current token to the token stream and make the given
/// token the current token.
void UnconsumeToken(Token &Consumed) {
Token Next = Tok;
PP.EnterToken(Consumed, /*IsReinject*/true);
PP.Lex(Tok);
PP.EnterToken(Next, /*IsReinject*/true);
}
SourceLocation ConsumeAnnotationToken() {
assert(Tok.isAnnotation() && "wrong consume method");
SourceLocation Loc = Tok.getLocation();
PrevTokLocation = Tok.getAnnotationEndLoc();
PP.Lex(Tok);
return Loc;
}
/// ConsumeParen - This consume method keeps the paren count up-to-date.
///
SourceLocation ConsumeParen() {
assert(isTokenParen() && "wrong consume method");
if (Tok.getKind() == tok::l_paren)
++ParenCount;
else if (ParenCount) {
AngleBrackets.clear(*this);
--ParenCount; // Don't let unbalanced )'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBracket - This consume method keeps the bracket count up-to-date.
///
SourceLocation ConsumeBracket() {
assert(isTokenBracket() && "wrong consume method");
if (Tok.getKind() == tok::l_square)
++BracketCount;
else if (BracketCount) {
AngleBrackets.clear(*this);
--BracketCount; // Don't let unbalanced ]'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBrace - This consume method keeps the brace count up-to-date.
///
SourceLocation ConsumeBrace() {
assert(isTokenBrace() && "wrong consume method");
if (Tok.getKind() == tok::l_brace)
++BraceCount;
else if (BraceCount) {
AngleBrackets.clear(*this);
--BraceCount; // Don't let unbalanced }'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeStringToken - Consume the current 'peek token', lexing a new one
/// and returning the token kind. This method is specific to strings, as it
/// handles string literal concatenation, as per C99 5.1.1.2, translation
/// phase #6.
SourceLocation ConsumeStringToken() {
assert(isTokenStringLiteral() &&
"Should only consume string literals with this method");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// Consume the current code-completion token.
///
/// This routine can be called to consume the code-completion token and
/// continue processing in special cases where \c cutOffParsing() isn't
/// desired, such as token caching or completion with lookahead.
SourceLocation ConsumeCodeCompletionToken() {
assert(Tok.is(tok::code_completion));
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
///\ brief When we are consuming a code-completion token without having
/// matched specific position in the grammar, provide code-completion results
/// based on context.
///
/// \returns the source location of the code-completion token.
SourceLocation handleUnexpectedCodeCompletionToken();
/// Abruptly cut off parsing; mainly used when we have reached the
/// code-completion point.
void cutOffParsing() {
if (PP.isCodeCompletionEnabled())
PP.setCodeCompletionReached();
// Cut off parsing by acting as if we reached the end-of-file.
Tok.setKind(tok::eof);
}
/// Determine if we're at the end of the file or at a transition
/// between modules.
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
Kind == tok::annot_module_end || Kind == tok::annot_module_include;
}
/// Checks if the \p Level is valid for use in a fold expression.
bool isFoldOperator(prec::Level Level) const;
/// Checks if the \p Kind is a valid operator for fold expressions.
bool isFoldOperator(tok::TokenKind Kind) const;
/// Initialize all pragma handlers.
void initializePragmaHandlers();
/// Destroy and reset all pragma handlers.
void resetPragmaHandlers();
/// Handle the annotation token produced for #pragma unused(...)
void HandlePragmaUnused();
/// Handle the annotation token produced for
/// #pragma GCC visibility...
void HandlePragmaVisibility();
/// Handle the annotation token produced for
/// #pragma pack...
void HandlePragmaPack();
/// Handle the annotation token produced for
/// #pragma ms_struct...
void HandlePragmaMSStruct();
/// Handle the annotation token produced for
/// #pragma comment...
void HandlePragmaMSComment();
void HandlePragmaMSPointersToMembers();
void HandlePragmaMSVtorDisp();
void HandlePragmaMSPragma();
bool HandlePragmaMSSection(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSSegment(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSInitSeg(StringRef PragmaName,
SourceLocation PragmaLocation);
/// Handle the annotation token produced for
/// #pragma align...
void HandlePragmaAlign();
/// Handle the annotation token produced for
/// #pragma clang __debug dump...
void HandlePragmaDump();
/// Handle the annotation token produced for
/// #pragma weak id...
void HandlePragmaWeak();
/// Handle the annotation token produced for
/// #pragma weak id = id...
void HandlePragmaWeakAlias();
/// Handle the annotation token produced for
/// #pragma redefine_extname...
void HandlePragmaRedefineExtname();
/// Handle the annotation token produced for
/// #pragma STDC FP_CONTRACT...
void HandlePragmaFPContract();
/// Handle the annotation token produced for
/// #pragma STDC FENV_ACCESS...
void HandlePragmaFEnvAccess();
/// \brief Handle the annotation token produced for
/// #pragma clang fp ...
void HandlePragmaFP();
/// Handle the annotation token produced for
/// #pragma OPENCL EXTENSION...
void HandlePragmaOpenCLExtension();
/// Handle the annotation token produced for
/// #pragma clang __debug captured
StmtResult HandlePragmaCaptured();
/// Handle the annotation token produced for
/// #pragma clang loop and #pragma unroll.
bool HandlePragmaLoopHint(LoopHint &Hint);
bool ParsePragmaAttributeSubjectMatchRuleSet(
attr::ParsedSubjectMatchRuleSet &SubjectMatchRules,
SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc);
void HandlePragmaAttribute();
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
///
/// Note that this differs from the Preprocessor's LookAhead method, because
/// the Parser always has one token lexed that the preprocessor doesn't.
///
const Token &GetLookAheadToken(unsigned N) {
if (N == 0 || Tok.is(tok::eof)) return Tok;
return PP.LookAhead(N-1);
}
/// C++ AMP-specific
/// check if the given scope is AMP-restricted
bool IsInAMPFunction(Scope *);
public:
/// NextToken - This peeks ahead one token and returns it without
/// consuming it.
const Token &NextToken() {
return PP.LookAhead(0);
}
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static ParsedType getTypeAnnotation(const Token &Tok) {
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
private:
static void setTypeAnnotation(Token &Tok, ParsedType T) {
Tok.setAnnotationValue(T.getAsOpaquePtr());
}
/// Read an already-translated primary expression out of an annotation
/// token.
static ExprResult getExprAnnotation(const Token &Tok) {
return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue());
}
/// Set the primary expression corresponding to the given annotation
/// token.
static void setExprAnnotation(Token &Tok, ExprResult ER) {
Tok.setAnnotationValue(ER.getAsOpaquePointer());
}
public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
bool TryAnnotateTypeOrScopeToken();
bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
bool IsNewScope);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
private:
enum AnnotatedNameKind {
/// Annotation has failed and emitted an error.
ANK_Error,
/// The identifier is a tentatively-declared name.
ANK_TentativeDecl,
/// The identifier is a template name. FIXME: Add an annotation for that.
ANK_TemplateName,
/// The identifier can't be resolved.
ANK_Unresolved,
/// Annotation was successful.
ANK_Success
};
AnnotatedNameKind TryAnnotateName(bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr);
/// Push a tok::annot_cxxscope token onto the token stream.
void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
/// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
/// replacing them with the non-context-sensitive keywords. This returns
/// true if the token was replaced.
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
if (!getLangOpts().AltiVec && !getLangOpts().ZVector)
return false;
if (Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_bool &&
(!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
}
/// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
bool TryAltiVecVectorTokenOutOfLine();
bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid);
/// Returns true if the current token is the identifier 'instancetype'.
///
/// Should only be used in Objective-C language modes.
bool isObjCInstancetype() {
assert(getLangOpts().ObjC);
if (Tok.isAnnotation())
return false;
if (!Ident_instancetype)
Ident_instancetype = PP.getIdentifierInfo("instancetype");
return Tok.getIdentifierInfo() == Ident_instancetype;
}
/// TryKeywordIdentFallback - For compatibility with system headers using
/// keywords as identifiers, attempt to convert the current token to an
/// identifier and optionally disable the keyword for the remainder of the
/// translation unit. This returns false if the token was not replaced,
/// otherwise emits a diagnostic and returns true.
bool TryKeywordIdentFallback(bool DisableKeyword);
/// Get the TemplateIdAnnotation from the token.
TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok);
/// TentativeParsingAction - An object that is used as a kind of "tentative
/// parsing transaction". It gets instantiated to mark the token position and
/// after the token consumption is done, Commit() or Revert() is called to
/// either "commit the consumed tokens" or revert to the previously marked
/// token position. Example:
///
/// TentativeParsingAction TPA(*this);
/// ConsumeToken();
/// ....
/// TPA.Revert();
///
class TentativeParsingAction {
Parser &P;
PreferredTypeBuilder PrevPreferredType;
Token PrevTok;
size_t PrevTentativelyDeclaredIdentifierCount;
unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount;
bool isActive;
public:
explicit TentativeParsingAction(Parser& p) : P(p) {
PrevPreferredType = P.PreferredType;
PrevTok = P.Tok;
PrevTentativelyDeclaredIdentifierCount =
P.TentativelyDeclaredIdentifiers.size();
PrevParenCount = P.ParenCount;
PrevBracketCount = P.BracketCount;
PrevBraceCount = P.BraceCount;
P.PP.EnableBacktrackAtThisPos();
isActive = true;
}
void Commit() {
assert(isActive && "Parsing action was finished!");
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.PP.CommitBacktrackedTokens();
isActive = false;
}
void Revert() {
assert(isActive && "Parsing action was finished!");
P.PP.Backtrack();
P.PreferredType = PrevPreferredType;
P.Tok = PrevTok;
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.ParenCount = PrevParenCount;
P.BracketCount = PrevBracketCount;
P.BraceCount = PrevBraceCount;
isActive = false;
}
~TentativeParsingAction() {
assert(!isActive && "Forgot to call Commit or Revert!");
}
};
/// A TentativeParsingAction that automatically reverts in its destructor.
/// Useful for disambiguation parses that will always be reverted.
class RevertingTentativeParsingAction
: private Parser::TentativeParsingAction {
public:
RevertingTentativeParsingAction(Parser &P)
: Parser::TentativeParsingAction(P) {}
~RevertingTentativeParsingAction() { Revert(); }
};
class UnannotatedTentativeParsingAction;
/// ObjCDeclContextSwitch - An object used to switch context from
/// an objective-c decl context to its enclosing decl context and
/// back.
class ObjCDeclContextSwitch {
Parser &P;
Decl *DC;
SaveAndRestore<bool> WithinObjCContainer;
public:
explicit ObjCDeclContextSwitch(Parser &p)
: P(p), DC(p.getObjCDeclContext()),
WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) {
if (DC)
P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
~ObjCDeclContextSwitch() {
if (DC)
P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
}
};
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
/// input. If so, it is consumed and false is returned.
///
/// If a trivial punctuator misspelling is encountered, a FixIt error
/// diagnostic is issued and false is returned after recovery.
///
/// If the input is malformed, this emits the specified diagnostic and true is
/// returned.
bool ExpectAndConsume(tok::TokenKind ExpectedTok,
unsigned Diag = diag::err_expected,
StringRef DiagMsg = "");
/// The parser expects a semicolon and, if present, will consume it.
///
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
/// The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
OutsideFunction = 0,
InsideStruct = 1,
InstanceVariableList = 2,
AfterMemberFunctionDefinition = 3
};
/// Consume any extra semi-colons until the end of the line.
void ConsumeExtraSemi(ExtraSemiKind Kind, unsigned TST = TST_unspecified);
/// Return false if the next token is an identifier. An 'expected identifier'
/// error is emitted otherwise.
///
/// The parser tries to recover from the error by checking if the next token
/// is a C++ keyword when parsing Objective-C++. Return false if the recovery
/// was successful.
bool expectIdentifier();
public:
//===--------------------------------------------------------------------===//
// Scope manipulation
/// ParseScope - Introduces a new scope for parsing. The kind of
/// scope is determined by ScopeFlags. Objects of this type should
/// be created on the stack to coincide with the position where the
/// parser enters the new scope, and this object's constructor will
/// create that new scope. Similarly, once the object is destroyed
/// the parser will exit the scope.
class ParseScope {
Parser *Self;
ParseScope(const ParseScope &) = delete;
void operator=(const ParseScope &) = delete;
public:
// ParseScope - Construct a new object to manage a scope in the
// parser Self where the new Scope is created with the flags
// ScopeFlags, but only when we aren't about to enter a compound statement.
ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true,
bool BeforeCompoundStmt = false)
: Self(Self) {
if (EnteredScope && !BeforeCompoundStmt)
Self->EnterScope(ScopeFlags);
else {
if (BeforeCompoundStmt)
Self->incrementMSManglingNumber();
this->Self = nullptr;
}
}
// Exit - Exit the scope associated with this object now, rather
// than waiting until the object is destroyed.
void Exit() {
if (Self) {
Self->ExitScope();
Self = nullptr;
}
}
~ParseScope() {
Exit();
}
};
/// EnterScope - Start a new scope.
void EnterScope(unsigned ScopeFlags);
/// ExitScope - Pop a scope off the scope stack.
void ExitScope();
private:
/// RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
unsigned OldFlags;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
public:
ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
~ParseScopeFlags();
};
//===--------------------------------------------------------------------===//
// Diagnostic Emission and Error recovery.
public:
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
DiagnosticBuilder Diag(unsigned DiagID) {
return Diag(Tok, DiagID);
}
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange);
void CheckNestedObjCContexts(SourceLocation AtLoc);
public:
/// Control flags for SkipUntil functions.
enum SkipUntilFlags {
StopAtSemi = 1 << 0, ///< Stop skipping at semicolon
/// Stop skipping at specified token, but don't skip the token itself
StopBeforeMatch = 1 << 1,
StopAtCodeCompletion = 1 << 2 ///< Stop at code completion
};
friend constexpr SkipUntilFlags operator|(SkipUntilFlags L,
SkipUntilFlags R) {
return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) |
static_cast<unsigned>(R));
}
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless StopBeforeMatch is specified). Because we cannot guarantee
/// that the token will ever occur, this skips to the next token, or to some
/// likely good stopping point. If Flags has StopAtSemi flag, skipping will
/// stop at a ';' character.
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool SkipUntil(tok::TokenKind T,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
return SkipUntil(llvm::makeArrayRef(T), Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2, T3};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(ArrayRef<tok::TokenKind> Toks,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0));
/// SkipMalformedDecl - Read tokens until we get to some likely good stopping
/// point for skipping past a simple-declaration.
void SkipMalformedDecl();
private:
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
struct ParsingClass;
/// [class.mem]p1: "... the class is regarded as complete within
/// - function bodies
/// - default arguments
/// - exception-specifications (TODO: C++0x)
/// - and brace-or-equal-initializers for non-static data members
/// (including such things in nested classes)."
/// LateParsedDeclarations build the tree of those elements so they can
/// be parsed after parsing the top-level class.
class LateParsedDeclaration {
public:
virtual ~LateParsedDeclaration();
virtual void ParseLexedMethodDeclarations();
virtual void ParseLexedMemberInitializers();
virtual void ParseLexedMethodDefs();
virtual void ParseLexedAttributes();
};
/// Inner node of the LateParsedDeclaration tree that parses
/// all its members recursively.
class LateParsedClass : public LateParsedDeclaration {
public:
LateParsedClass(Parser *P, ParsingClass *C);
~LateParsedClass() override;
void ParseLexedMethodDeclarations() override;
void ParseLexedMemberInitializers() override;
void ParseLexedMethodDefs() override;
void ParseLexedAttributes() override;
private:
Parser *Self;
ParsingClass *Class;
};
/// Contains the lexed tokens of an attribute with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
/// FIXME: Perhaps we should change the name of LateParsedDeclaration to
/// LateParsedTokens.
struct LateParsedAttribute : public LateParsedDeclaration {
Parser *Self;
CachedTokens Toks;
IdentifierInfo &AttrName;
IdentifierInfo *MacroII = nullptr;
SourceLocation AttrNameLoc;
SmallVector<Decl*, 2> Decls;
explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
SourceLocation Loc)
: Self(P), AttrName(Name), AttrNameLoc(Loc) {}
void ParseLexedAttributes() override;
void addDecl(Decl *D) { Decls.push_back(D); }
};
// A list of late-parsed attributes. Used by ParseGNUAttributes.
class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> {
public:
LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { }
bool parseSoon() { return ParseSoon; }
private:
bool ParseSoon; // Are we planning to parse these shortly after creation?
};
/// Contains the lexed tokens of a member function definition
/// which needs to be parsed at the end of the class declaration
/// after parsing all other member declarations.
struct LexedMethod : public LateParsedDeclaration {
Parser *Self;
Decl *D;
CachedTokens Toks;
/// Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
explicit LexedMethod(Parser* P, Decl *MD)
: Self(P), D(MD), TemplateScope(false) {}
void ParseLexedMethodDefs() override;
};
/// LateParsedDefaultArgument - Keeps track of a parameter that may
/// have a default argument that cannot be parsed yet because it
/// occurs within a member function declaration inside the class
/// (C++ [class.mem]p2).
struct LateParsedDefaultArgument {
explicit LateParsedDefaultArgument(Decl *P,
std::unique_ptr<CachedTokens> Toks = nullptr)
: Param(P), Toks(std::move(Toks)) { }
/// Param - The parameter declaration for this parameter.
Decl *Param;
/// Toks - The sequence of tokens that comprises the default
/// argument expression, not including the '=' or the terminating
/// ')' or ','. This will be NULL for parameters that have no
/// default argument.
std::unique_ptr<CachedTokens> Toks;
};
/// LateParsedMethodDeclaration - A method declaration inside a class that
/// contains at least one entity whose parsing needs to be delayed
/// until the class itself is completely-defined, such as a default
/// argument (C++ [class.mem]p2).
struct LateParsedMethodDeclaration : public LateParsedDeclaration {
explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
: Self(P), Method(M), TemplateScope(false),
ExceptionSpecTokens(nullptr) {}
void ParseLexedMethodDeclarations() override;
Parser* Self;
/// Method - The method declaration.
Decl *Method;
/// Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
/// DefaultArgs - Contains the parameters of the function and
/// their default arguments. At least one of the parameters will
/// have a default argument, but all of the parameters of the
/// method will be stored so that they can be reintroduced into
/// scope at the appropriate times.
SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
/// The set of tokens that make up an exception-specification that
/// has not yet been parsed.
CachedTokens *ExceptionSpecTokens;
};
/// LateParsedMemberInitializer - An initializer for a non-static class data
/// member whose parsing must to be delayed until the class is completely
/// defined (C++11 [class.mem]p2).
struct LateParsedMemberInitializer : public LateParsedDeclaration {
LateParsedMemberInitializer(Parser *P, Decl *FD)
: Self(P), Field(FD) { }
void ParseLexedMemberInitializers() override;
Parser *Self;
/// Field - The field declaration.
Decl *Field;
/// CachedTokens - The sequence of tokens that comprises the initializer,
/// including any leading '='.
CachedTokens Toks;
};
/// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
/// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definition is completed (C++ [class.mem]p2),
/// the method declarations and possibly attached inline definitions
/// will be stored here with the tokens that will be parsed to create those
/// entities.
typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
/// Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
: TopLevelClass(TopLevelClass), TemplateScope(false),
IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { }
/// Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
bool TopLevelClass : 1;
/// Whether this class had an associated template
/// scope. When true, TagOrTemplate is a template declaration;
/// otherwise, it is a tag declaration.
bool TemplateScope : 1;
/// Whether this class is an __interface.
bool IsInterface : 1;
/// The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
/// LateParsedDeclarations - Method declarations, inline definitions and
/// nested classes that contain pieces whose parsing will be delayed until
/// the top-level class is fully defined.
LateParsedDeclarationsContainer LateParsedDeclarations;
};
/// The stack of classes that is currently being
/// parsed. Nested and local classes will be pushed onto this stack
/// when they are parsed, and removed afterward.
std::stack<ParsingClass *> ClassStack;
ParsingClass &getCurrentClass() {
assert(!ClassStack.empty() && "No lexed method stacks!");
return *ClassStack.top();
}
/// RAII object used to manage the parsing of a class definition.
class ParsingClassDefinition {
Parser &P;
bool Popped;
Sema::ParsingClassState State;
public:
ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass,
bool IsInterface)
: P(P), Popped(false),
State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) {
}
/// Pop this class of the stack.
void Pop() {
assert(!Popped && "Nested class has already been popped");
Popped = true;
P.PopParsingClass(State);
}
~ParsingClassDefinition() {
if (!Popped)
P.PopParsingClass(State);
}
};
/// Contains information about any template-specific
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
ParsedTemplateInfo()
: Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { }
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
bool lastParameterListWasEmpty = false)
: Kind(isSpecialization? ExplicitSpecialization : Template),
TemplateParams(TemplateParams),
LastParameterListWasEmpty(lastParameterListWasEmpty) { }
explicit ParsedTemplateInfo(SourceLocation ExternLoc,
SourceLocation TemplateLoc)
: Kind(ExplicitInstantiation), TemplateParams(nullptr),
ExternLoc(ExternLoc), TemplateLoc(TemplateLoc),
LastParameterListWasEmpty(false){ }
/// The kind of template we are parsing.
enum {
/// We are not parsing a template at all.
NonTemplate = 0,
/// We are parsing a template declaration.
Template,
/// We are parsing an explicit specialization.
ExplicitSpecialization,
/// We are parsing an explicit instantiation.
ExplicitInstantiation
} Kind;
/// The template parameter lists, for template declarations
/// and explicit specializations.
TemplateParameterLists *TemplateParams;
/// The location of the 'extern' keyword, if any, for an explicit
/// instantiation
SourceLocation ExternLoc;
/// The location of the 'template' keyword, for an explicit
/// instantiation.
SourceLocation TemplateLoc;
/// Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
SourceRange getSourceRange() const LLVM_READONLY;
};
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT);
static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT);
static void LateTemplateParserCleanupCallback(void *P);
Sema::ParsingClassState
PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
void DeallocateParsedClasses(ParsingClass *Class);
void PopParsingClass(Sema::ParsingClassState);
enum CachedInitKind {
CIK_DefaultArgument,
CIK_DefaultInitializer
};
NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS,
ParsedAttributes &AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
const VirtSpecifiers &VS,
SourceLocation PureSpecLoc);
void ParseCXXNonStaticMemberInitializer(Decl *VarD);
void ParseLexedAttributes(ParsingClass &Class);
void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition);
void ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
void ParseLexedMethodDef(LexedMethod &LM);
void ParseLexedMemberInitializers(ParsingClass &Class);
void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod);
bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK);
bool ConsumeAndStoreConditional(CachedTokens &Toks);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true) {
return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken);
}
bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true);
// C++AMP
bool CXXAMPFindRestrictionSeq(CachedTokens &Toks, bool ConsumeFinalToken);
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
struct ParsedAttributesWithRange : ParsedAttributes {
ParsedAttributesWithRange(AttributeFactory &factory)
: ParsedAttributes(factory) {}
void clear() {
ParsedAttributes::clear();
Range = SourceRange();
}
SourceRange Range;
};
struct ParsedAttributesViewWithRange : ParsedAttributesView {
ParsedAttributesViewWithRange() : ParsedAttributesView() {}
void clearListOnly() {
ParsedAttributesView::clearListOnly();
Range = SourceRange();
}
SourceRange Range;
};
DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr,
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
ParsingDeclSpec &DS,
AccessSpecifier AS);
void SkipFunctionBody();
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
LateParsedAttrList *LateParsedAttrs = nullptr);
void ParseKNRParamDeclarations(Declarator &D);
// EndLoc, if non-NULL, is filled with the location of the last token of
// the simple-asm.
ExprResult ParseSimpleAsm(SourceLocation *EndLoc = nullptr);
ExprResult ParseAsmStringLiteral();
// Objective-C External Declarations
void MaybeSkipAttributes(tok::ObjCKeywordKind Kind);
DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs);
DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
class ObjCTypeParamListScope;
ObjCTypeParamList *parseObjCTypeParamList();
ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs(
ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
SmallVectorImpl<IdentifierLocPair> &protocolIdents,
SourceLocation &rAngleLoc, bool mayBeProtocolList = true);
void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
BalancedDelimiterTracker &T,
SmallVectorImpl<Decl *> &AllIvarDecls,
bool RBraceMissing);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
SmallVectorImpl<SourceLocation> &PLocs,
bool WarnOnDeclarations,
bool ForObjCContainer,
SourceLocation &LAngleLoc,
SourceLocation &EndProtoLoc,
bool consumeLastToken);
/// Parse the first angle-bracket-delimited clause for an
/// Objective-C object or object pointer type, which may be either
/// type arguments or protocol qualifiers.
void parseObjCTypeArgsOrProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken,
bool warnOnIncompleteProtocols);
/// Parse either Objective-C type arguments or protocol qualifiers; if the
/// former, also parse protocol qualifiers afterward.
void parseObjCTypeArgsAndProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken);
/// Parse a protocol qualifier type such as '<NSCopying>', which is
/// an anachronistic way of writing 'id<NSCopying>'.
TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc);
/// Parse Objective-C type arguments and protocol qualifiers, extending the
/// current type with the parsed result.
TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc,
ParsedType type,
bool consumeLastToken,
SourceLocation &endLoc);
void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl);
DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
ParsedAttributes &prefixAttrs);
struct ObjCImplParsingDataRAII {
Parser &P;
Decl *Dcl;
bool HasCFunction;
typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
LateParsedObjCMethodContainer LateParsedObjCMethods;
ObjCImplParsingDataRAII(Parser &parser, Decl *D)
: P(parser), Dcl(D), HasCFunction(false) {
P.CurParsedObjCImpl = this;
Finished = false;
}
~ObjCImplParsingDataRAII();
void finish(SourceRange AtEnd);
bool isFinished() const { return Finished; }
private:
bool Finished;
};
ObjCImplParsingDataRAII *CurParsedObjCImpl;
void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl);
DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc,
ParsedAttributes &Attrs);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
Decl *ParseObjCPropertyDynamic(SourceLocation atLoc);
IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation);
// Definitions for Objective-c context sensitive keywords recognition.
enum ObjCTypeQual {
objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref,
objc_nonnull, objc_nullable, objc_null_unspecified,
objc_NumQuals
};
IdentifierInfo *ObjCTypeQuals[objc_NumQuals];
bool isTokIdentifier_in() const;
ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx,
ParsedAttributes *ParamAttrs);
void ParseObjCMethodRequirement();
Decl *ParseObjCMethodPrototype(
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition = true);
Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition=true);
void ParseObjCPropertyAttribute(ObjCDeclSpec &DS);
Decl *ParseObjCMethodDefinition();
public:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
/// TypeCastState - State whether an expression is or may be a type cast.
enum TypeCastState {
NotTypeCast = 0,
MaybeTypeCast,
IsTypeCast
};
ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpressionInExprEvalContext(
TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseCaseExpression(SourceLocation CaseLoc);
ExprResult ParseConstraintExpression();
// Expr that doesn't include commas.
ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
unsigned &NumLineToksConsumed,
bool IsUnevaluated);
private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS,
prec::Level MinPrec);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand,
bool &NotCastExpr,
TypeCastState isTypeCast,
bool isVectorLiteral = false);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand = false,
TypeCastState isTypeCast = NotTypeCast,
bool isVectorLiteral = false);
/// Returns true if the next token cannot start an expression.
bool isNotExpressionStart();
/// Returns true if the next token would start a postfix-expression
/// suffix.
bool isPostfixExpressionSuffixStart() {
tok::TokenKind K = Tok.getKind();
return (K == tok::l_square || K == tok::l_paren ||
K == tok::period || K == tok::arrow ||
K == tok::plusplus || K == tok::minusminus);
}
bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less);
void checkPotentialAngleBracket(ExprResult &PotentialTemplateName);
bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &,
const Token &OpToken);
bool checkPotentialAngleBracketDelimiter(const Token &OpToken) {
if (auto *Info = AngleBrackets.getCurrent(*this))
return checkPotentialAngleBracketDelimiter(*Info, OpToken);
return false;
}
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
ParsedType &CastTy,
SourceRange &CastRange);
typedef SmallVector<Expr*, 20> ExprListTy;
typedef SmallVector<SourceLocation, 20> CommaLocsTy;
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
llvm::function_ref<void()> ExpressionStarts =
llvm::function_ref<void()>());
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
/// used for misc language extensions.
bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs);
/// ParenParseOption - Control what ParseParenExpression will parse.
enum ParenParseOption {
SimpleExpr, // Only parse '(' expression ')'
FoldExpr, // Also allow fold-expression <anything>
CompoundStmt, // Also allow '(' compound-statement ')'
CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}'
CastExpr // Also allow '(' type-name ')' <anything>
};
ExprResult ParseParenExpression(ParenParseOption &ExprType,
bool stopIfCastExpr,
bool isTypeCast,
ParsedType &CastTy,
SourceLocation &RParenLoc);
ExprResult ParseCXXAmbiguousParenExpression(
ParenParseOption &ExprType, ParsedType &CastTy,
BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt);
ExprResult ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
ExprResult ParseGenericSelectionExpression();
ExprResult ParseObjCBoolLiteral();
ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T);
//===--------------------------------------------------------------------===//
// C++ Expressions
ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
Token &Replacement);
ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
bool areTokensAdjacent(const Token &A, const Token &B);
void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext,
bool *MayBePseudoDestructor = nullptr,
bool IsTypename = false,
IdentifierInfo **LastII = nullptr,
bool OnlyNamespace = false);
//===--------------------------------------------------------------------===//
// C++11 5.1.2: Lambda expressions
/// Result of tentatively parsing a lambda-introducer.
enum class LambdaIntroducerTentativeParse {
/// This appears to be a lambda-introducer, which has been fully parsed.
Success,
/// This is a lambda-introducer, but has not been fully parsed, and this
/// function needs to be called again to parse it.
Incomplete,
/// This is definitely an Objective-C message send expression, rather than
/// a lambda-introducer, attribute-specifier, or array designator.
MessageSend,
/// This is not a lambda-introducer.
Invalid,
};
// [...] () -> type {...}
ExprResult ParseLambdaExpression();
ExprResult TryParseLambdaExpression();
bool TryParseLambdaIntroducer(LambdaIntroducer &Intro, ParsedAttributes &AttrIntro);
bool
ParseLambdaIntroducer(LambdaIntroducer &Intro,
ParsedAttributes &AttrIntro,
LambdaIntroducerTentativeParse *Tentative = nullptr);
ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro, ParsedAttributes &AttrIntro);
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Casts
ExprResult ParseCXXCasts();
/// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast.
ExprResult ParseBuiltinBitCast();
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Type Identification
ExprResult ParseCXXTypeid();
//===--------------------------------------------------------------------===//
// C++ : Microsoft __uuidof Expression
ExprResult ParseCXXUuidof();
//===--------------------------------------------------------------------===//
// C++ 5.2.4: C++ Pseudo-Destructor Expressions
ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType);
//===--------------------------------------------------------------------===//
// C++ 9.3.2: C++ 'this' pointer
ExprResult ParseCXXThis();
//===--------------------------------------------------------------------===//
// C++ 15: C++ Throw Expression
ExprResult ParseThrowExpression();
ExceptionSpecificationType tryParseExceptionSpecification(
bool Delayed,
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &DynamicExceptions,
SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
ExprResult &NoexceptExpr,
CachedTokens *&ExceptionSpecTokens);
// EndLoc is filled with the location of the last token of the specification.
ExceptionSpecificationType ParseDynamicExceptionSpecification(
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &Exceptions,
SmallVectorImpl<SourceRange> &Ranges);
//===--------------------------------------------------------------------===//
// C++0x 8: Function declaration trailing-return-type
TypeResult ParseTrailingReturnType(SourceRange &Range,
bool MayBeFollowedByDirectInit);
//===--------------------------------------------------------------------===//
// C++ 2.13.5: C++ Boolean Literals
ExprResult ParseCXXBoolLiteral();
//===--------------------------------------------------------------------===//
// C++ 5.2.3: Explicit type conversion (functional notation)
ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
//===--------------------------------------------------------------------===//
// C++ 5.3.4 and 5.3.5: C++ new and delete
bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs,
Declarator &D);
void ParseDirectNewDeclarator(Declarator &D);
ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start);
ExprResult ParseCXXDeleteExpression(bool UseGlobal,
SourceLocation Start);
//===--------------------------------------------------------------------===//
// C++ if/switch/while/for condition expression.
struct ForRangeInfo;
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK,
ForRangeInfo *FRI = nullptr);
//===--------------------------------------------------------------------===//
// C++ Coroutines
ExprResult ParseCoyieldExpression();
//===--------------------------------------------------------------------===//
// C99 6.7.8: Initialization.
/// ParseInitializer
/// initializer: [C99 6.7.8]
/// assignment-expression
/// '{' ...
ExprResult ParseInitializer() {
if (Tok.isNot(tok::l_brace))
return ParseAssignmentExpression();
return ParseBraceInitializer();
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
ExprResult ParseInitializerWithPotentialDesignator();
//===--------------------------------------------------------------------===//
// clang Expressions
ExprResult ParseBlockLiteralExpression(); // ^{...}
//===--------------------------------------------------------------------===//
// Objective-C Expressions
ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
bool isSimpleObjCMessageExpression();
ExprResult ParseObjCMessageExpression();
ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
Expr *ReceiverExpr);
ExprResult ParseAssignmentExprWithObjCMessageExprStart(
SourceLocation LBracloc, SourceLocation SuperLoc,
ParsedType ReceiverType, Expr *ReceiverExpr);
bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
/// A SmallVector of statements, with stack size 32 (as that is the only one
/// used.)
typedef SmallVector<Stmt*, 32> StmtVector;
/// A SmallVector of expressions, with stack size 12 (the maximum used.)
typedef SmallVector<Expr*, 12> ExprVector;
/// A SmallVector of types.
typedef SmallVector<ParsedType, 12> TypeVector;
StmtResult
ParseStatement(SourceLocation *TrailingElseLoc = nullptr,
ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt);
StmtResult ParseStatementOrDeclaration(
StmtVector &Stmts, ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc = nullptr);
StmtResult ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
StmtResult ParseExprStatement(ParsedStmtContext StmtCtx);
StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs,
ParsedStmtContext StmtCtx);
StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx,
bool MissingCase = false,
ExprResult Expr = ExprResult());
StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx);
StmtResult ParseCompoundStatement(bool isStmtExpr = false);
StmtResult ParseCompoundStatement(bool isStmtExpr,
unsigned ScopeFlags);
void ParseCompoundStatementLeadingPragmas();
bool ConsumeNullStmt(StmtVector &Stmts);
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
SourceLocation Loc,
Sema::ConditionKind CK);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseDoStatement();
StmtResult ParseForStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseGotoStatement();
StmtResult ParseContinueStatement();
StmtResult ParseBreakStatement();
StmtResult ParseReturnStatement();
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
StmtResult ParsePragmaLoopHint(StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
/// Describes the behavior that should be taken for an __if_exists
/// block.
enum IfExistsBehavior {
/// Parse the block; this code is always used.
IEB_Parse,
/// Skip the block entirely; this code is never used.
IEB_Skip,
/// Parse the block as a dependent block, which may be used in
/// some template instantiations but not others.
IEB_Dependent
};
/// Describes the condition of a Microsoft __if_exists or
/// __if_not_exists block.
struct IfExistsCondition {
/// The location of the initial keyword.
SourceLocation KeywordLoc;
/// Whether this is an __if_exists block (rather than an
/// __if_not_exists block).
bool IsIfExists;
/// Nested-name-specifier preceding the name.
CXXScopeSpec SS;
/// The name we're looking for.
UnqualifiedId Name;
/// The behavior of this __if_exists or __if_not_exists block
/// should.
IfExistsBehavior Behavior;
};
bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
void ParseMicrosoftIfExistsExternalDeclaration();
void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
ParsedAttributes &AccessAttrs,
AccessSpecifier &CurAS);
bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
bool &InitExprsOk);
bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
SmallVectorImpl<Expr *> &Constraints,
SmallVectorImpl<Expr *> &Exprs);
//===--------------------------------------------------------------------===//
// C++ 6: Statements and Blocks
StmtResult ParseCXXTryBlock();
StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false);
StmtResult ParseCXXCatchBlock(bool FnCatch = false);
//===--------------------------------------------------------------------===//
// MS: SEH Statements and Blocks
StmtResult ParseSEHTryBlock();
StmtResult ParseSEHExceptBlock(SourceLocation Loc);
StmtResult ParseSEHFinallyBlock(SourceLocation Loc);
StmtResult ParseSEHLeaveStatement();
//===--------------------------------------------------------------------===//
// Objective-C Statements
StmtResult ParseObjCAtStatement(SourceLocation atLoc,
ParsedStmtContext StmtCtx);
StmtResult ParseObjCTryStmt(SourceLocation atLoc);
StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc);
//===--------------------------------------------------------------------===//
// C99 6.7: Declarations.
/// A context for parsing declaration specifiers. TODO: flesh this
/// out, there are other significant restrictions on specifiers than
/// would be best implemented in the parser.
enum class DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
DSC_top_level, // top-level/namespace declaration context
DSC_template_param, // template parameter context
DSC_template_type_arg, // template type argument context
DSC_objc_method_result, // ObjC method result context, enables 'instancetype'
DSC_condition // condition declaration context
};
/// Is this a context in which we are parsing just a type-specifier (or
/// trailing-type-specifier)?
static bool isTypeSpecifier(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
return false;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which we can perform class template argument
/// deduction?
static bool isClassTemplateDeductionContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_type_specifier:
return true;
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Information on a C++0x for-range-initializer found while parsing a
/// declaration which turns out to be a for-range-declaration.
struct ForRangeInit {
SourceLocation ColonLoc;
ExprResult RangeExpr;
bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); }
};
struct ForRangeInfo : ForRangeInit {
StmtResult LoopVar;
};
DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs);
DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
bool RequireSemi,
ForRangeInit *FRI = nullptr);
bool MightBeDeclarator(DeclaratorContext Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context,
SourceLocation *DeclEnd = nullptr,
ForRangeInit *FRI = nullptr);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
bool ParseAsmAttributesAfterDeclarator(Declarator &D);
Decl *ParseDeclarationAfterDeclaratorAndAttributes(
Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ForRangeInit *FRI = nullptr);
Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope);
Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope);
/// When in code-completion, skip parsing of the function/method body
/// unless the body contains the code-completion point.
///
/// \returns true if the function body was skipped.
bool trySkippingFunctionBody();
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
ParsedAttributesWithRange &Attrs);
DeclSpecContext
getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context);
void ParseDeclarationSpecifiers(
DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal,
LateParsedAttrList *LateAttrs = nullptr);
bool DiagnoseMissingSemiAfterTagDefinition(
DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs = nullptr);
void ParseSpecifierQualifierList(
DeclSpec &DS, AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
DeclaratorContext Context);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, unsigned TagType,
Decl *TagDecl);
void ParseStructDeclaration(
ParsingDeclSpec &DS,
llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback);
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
/// is definitely a type-specifier. Return false if it isn't part of a type
/// specifier or if we're not sure.
bool isKnownToBeTypeSpecifier(const Token &Tok) const;
/// Return true if we know that we are definitely looking at a
/// decl-specifier, and isn't part of an expression such as a function-style
/// cast. Return false if it's no a decl-specifier, or we're not sure.
bool isKnownToBeDeclarationSpecifier() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationSpecifier() == TPResult::True;
return isDeclarationSpecifier(true);
}
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
bool isDeclarationStatement() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationStatement();
return isDeclarationSpecifier(true);
}
/// isForInitDeclaration - Disambiguates between a declaration or an
/// expression in the context of the C 'clause-1' or the C++
// 'for-init-statement' part of a 'for' statement.
/// Returns true for declaration, false for expression.
bool isForInitDeclaration() {
if (getLangOpts().OpenMP)
Actions.startOpenMPLoop();
if (getLangOpts().CPlusPlus)
return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
return isDeclarationSpecifier(true);
}
/// Determine whether this is a C++1z for-range-identifier.
bool isForRangeIdentifier();
/// Determine whether we are currently at the start of an Objective-C
/// class message that appears to be missing the open bracket '['.
bool isStartOfObjCClassMessageMissingOpenBracket();
/// Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false);
/// Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
TypeIdAsTemplateArgument
};
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
bool isTypeIdInParens(bool &isAmbiguous) {
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdInParens, isAmbiguous);
isAmbiguous = false;
return isTypeSpecifierQualifier();
}
bool isTypeIdInParens() {
bool isAmbiguous;
return isTypeIdInParens(isAmbiguous);
}
/// Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
bool IsAmbiguous;
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
return isTypeSpecifierQualifier();
}
/// isCXXDeclarationStatement - C++-specialized function that disambiguates
/// between a declaration or an expression statement, when parsing function
/// bodies. Returns true for declaration, false for expression.
bool isCXXDeclarationStatement();
/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
/// between a simple-declaration or an expression-statement.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
/// Returns false if the statement is disambiguated as expression.
bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
/// Returns true for function declarator and false for constructor-style
/// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration
/// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr);
struct ConditionDeclarationOrInitStatementState;
enum class ConditionOrInitStatement {
Expression, ///< Disambiguated as an expression (either kind).
ConditionDecl, ///< Disambiguated as the declaration form of condition.
InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement.
ForRangeDecl, ///< Disambiguated as a for-range declaration.
Error ///< Can't be any of the above!
};
/// Disambiguates between the different kinds of things that can happen
/// after 'if (' or 'switch ('. This could be one of two different kinds of
/// declaration (depending on whether there is a ';' later) or an expression.
ConditionOrInitStatement
isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt,
bool CanBeForRangeDecl);
bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous);
bool isCXXTypeId(TentativeCXXTypeIdContext Context) {
bool isAmbiguous;
return isCXXTypeId(Context, isAmbiguous);
}
/// TPResult - Used as the result value for functions whose purpose is to
/// disambiguate C++ constructs by "tentatively parsing" them.
enum class TPResult {
True, False, Ambiguous, Error
};
/// Based only on the given token kind, determine whether we know that
/// we're at the start of an expression or a type-specifier-seq (which may
/// be an expression, in C++).
///
/// This routine does not attempt to resolve any of the trick cases, e.g.,
/// those involving lookup of identifiers.
///
/// \returns \c TPR_true if this token starts an expression, \c TPR_false if
/// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot
/// tell.
TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind);
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a
/// declaration specifier, TPResult::False if it is not,
/// TPResult::Ambiguous if it could be either a decl-specifier or a
/// function-style cast, and TPResult::Error if a parsing error was
/// encountered. If it could be a braced C++11 function-style cast, returns
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False,
bool *InvalidAsDeclSpec = nullptr);
/// Given that isCXXDeclarationSpecifier returns \c TPResult::True or
/// \c TPResult::Ambiguous, determine whether the decl-specifier would be
/// a type-specifier other than a cv-qualifier.
bool isCXXDeclarationSpecifierAType();
/// Determine whether the current token sequence might be
/// '<' template-argument-list '>'
/// rather than a less-than expression.
TPResult isTemplateArgumentList(unsigned TokensToSkip);
/// Determine whether an identifier has been tentatively declared as a
/// non-type. Such tentative declarations should not be found to name a type
/// during a tentative parse, but also should not be annotated as a non-type.
bool isTentativelyDeclared(IdentifierInfo *II);
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error.
// Returning TPResult::True/False indicates that the ambiguity was
// resolved and tentative parsing may stop. TPResult::Ambiguous indicates
// that more tentative parsing is necessary for disambiguation.
// They all consume tokens, so backtracking should be used after calling them.
TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true,
bool mayHaveDirectInit = false);
TPResult
TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr,
bool VersusTemplateArg = false);
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
public:
TypeResult ParseTypeName(SourceRange *Range = nullptr,
DeclaratorContext Context
= DeclaratorContext::TypeNameContext,
AccessSpecifier AS = AS_none,
Decl **OwnedType = nullptr,
ParsedAttributes *Attrs = nullptr);
private:
void ParseBlockId(SourceLocation CaretLoc);
/// Are [[]] attributes enabled?
bool standardAttributesAllowed() const {
const LangOptions &LO = getLangOpts();
return LO.DoubleSquareBracketAttributes;
}
// Check for the start of an attribute-specifier-seq in a context where an
// attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
bool DiagnoseProhibitedCXX11Attribute();
void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation) {
if (!standardAttributesAllowed())
return;
if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
}
void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation);
void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
DeclSpec &DS, Sema::TagUseKind TUK);
// FixItLoc = possible correct location for the attributes
void ProhibitAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clear();
}
void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clearListOnly();
}
void DiagnoseProhibitedAttributes(const SourceRange &Range,
SourceLocation FixItLoc);
// Forbid C++11 and C2x attributes that appear on certain syntactic locations
// which standard permits but we don't supported yet, for example, attributes
// appertain to decl specifiers.
void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
unsigned DiagID);
/// Skip C++11 and C2x attributes and return the end location of the
/// last one.
/// \returns SourceLocation() if there are no attributes.
SourceLocation SkipCXX11Attributes();
/// Diagnose and skip C++11 and C2x attributes that appear in syntactic
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
/// Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
/// list with the given attribute syntax. Returns the number of arguments
/// parsed for the attribute.
unsigned
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributes attrs(AttrFactory);
SourceLocation endLoc;
ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute))
ParseGNUAttributes(attrs, endLoc, LateAttrs);
}
void ParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax, Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
unsigned
ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseCXX11Attributes(Declarator &D) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation endLoc;
ParseCXX11Attributes(attrs, &endLoc);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseCXX11Attributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
}
}
void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
bool OuterMightBeMessageSend = false) {
if (standardAttributesAllowed() &&
isCXX11AttributeSpecifier(false, OuterMightBeMessageSend))
ParseCXX11Attributes(attrs, endLoc);
}
void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
SourceLocation *EndLoc = nullptr);
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = nullptr);
/// Parses a C++11 (or C2x)-style attribute argument list. Returns true
/// if this results in adding an attribute to the ParsedAttributes list.
bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc);
IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(attrs, endLoc);
}
void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs);
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr);
void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr) {
const auto &LO = getLangOpts();
if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec))
ParseMicrosoftDeclSpecs(Attrs, End);
}
void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr);
bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParseHCCQualifiers(ParsedAttributes &Attrs);
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
/// Parses opencl_unroll_hint attribute if language is OpenCL v2.0
/// or higher.
/// \return false if error happens.
bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) {
if (getLangOpts().OpenCL)
return ParseOpenCLUnrollHintAttribute(Attrs);
return true;
}
/// Parses opencl_unroll_hint attribute.
/// \return false if error happens.
bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
VersionTuple ParseVersionTuple(SourceRange &Range);
void ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation AvailabilityLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
Optional<AvailabilitySpec> ParseAvailabilitySpec();
ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol,
SourceLocation Loc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void
ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc, ParsedAttributes &Attrs,
SourceLocation *EndLoc, IdentifierInfo *ScopeName,
SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
ExprResult ParseAlignArgument(SourceLocation Start,
SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const;
VirtSpecifiers::Specifier isCXX11VirtSpecifier() const {
return isCXX11VirtSpecifier(Tok);
}
void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface,
SourceLocation FriendLoc);
bool isCXX11FinalKeyword() const;
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
/// finished.
class DeclaratorScopeObj {
Parser &P;
CXXScopeSpec &SS;
bool EnteredScope;
bool CreatedScope;
public:
DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss)
: P(p), SS(ss), EnteredScope(false), CreatedScope(false) {}
void EnterDeclaratorScope() {
assert(!EnteredScope && "Already entered the scope!");
assert(SS.isSet() && "C++ scope was not set!");
CreatedScope = true;
P.EnterScope(0); // Not a decl scope.
if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
EnteredScope = true;
}
~DeclaratorScopeObj() {
if (EnteredScope) {
assert(SS.isSet() && "C++ scope was cleared ?");
P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
}
if (CreatedScope)
P.ExitScope();
}
};
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
void ParseDeclarator(Declarator &D);
/// A function that parses a variant of direct-declarator.
typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
void ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser);
enum AttrRequirements {
AR_NoAttributesParsed = 0, ///< No attributes are diagnosed.
AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes.
AR_GNUAttributesParsed = 1 << 1,
AR_CXX11AttributesParsed = 1 << 2,
AR_DeclspecAttributesParsed = 1 << 3,
AR_AllAttributesParsed = AR_GNUAttributesParsed |
AR_CXX11AttributesParsed |
AR_DeclspecAttributesParsed,
AR_VendorAttributesParsed = AR_GNUAttributesParsed |
AR_DeclspecAttributesParsed
};
void ParseTypeQualifierListOpt(
DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed,
bool AtomicAllowed = true, bool IdentifierRequired = false,
Optional<llvm::function_ref<void()>> CodeCompletionHandler = None);
void ParseDirectDeclarator(Declarator &D);
void ParseDecompositionDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
void ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker,
bool IsAmbiguous,
bool RequiresArg = false);
bool ParseRefQualifier(bool &RefQualifierIsLValueRef,
SourceLocation &RefQualifierLoc);
bool isFunctionDeclaratorIdentifierList();
void ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo);
void ParseParameterDeclarationClause(
Declarator &D,
ParsedAttributes &attrs,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
SourceLocation &EllipsisLoc);
void ParseBracketDeclarator(Declarator &D);
void ParseMisplacedBracketDeclarator(Declarator &D);
// C++AMP
unsigned ParseRestrictionSpecification(Declarator &D,
ParsedAttributes &Attrs,
SourceLocation &DeclEndLoc);
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
/// The kind of attribute specifier we have found.
enum CXX11AttributeKind {
/// This is not an attribute specifier.
CAK_NotAttributeSpecifier,
/// This should be treated as an attribute-specifier.
CAK_AttributeSpecifier,
/// The next tokens are '[[', but this is not an attribute-specifier. This
/// is ill-formed by C++11 [dcl.attr.grammar]p6.
CAK_InvalidAttributeSpecifier
};
CXX11AttributeKind
isCXX11AttributeSpecifier(bool Disambiguate = false,
bool OuterMightBeMessageSend = false);
void DiagnoseUnexpectedNamespace(NamedDecl *Context);
DeclGroupPtrTy ParseNamespace(DeclaratorContext Context,
SourceLocation &DeclEnd,
SourceLocation InlineLoc = SourceLocation());
struct InnerNamespaceInfo {
SourceLocation NamespaceLoc;
SourceLocation InlineLoc;
SourceLocation IdentLoc;
IdentifierInfo *Ident;
};
using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>;
void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs,
unsigned int index, SourceLocation &InlineLoc,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker);
Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context);
Decl *ParseExportDeclaration();
DeclGroupPtrTy ParseUsingDirectiveOrDeclaration(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs);
Decl *ParseUsingDirective(DeclaratorContext Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributes &attrs);
struct UsingDeclarator {
SourceLocation TypenameLoc;
CXXScopeSpec SS;
UnqualifiedId Name;
SourceLocation EllipsisLoc;
void clear() {
TypenameLoc = EllipsisLoc = SourceLocation();
SS.clear();
Name.clear();
}
};
bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D);
DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none);
Decl *ParseAliasDeclarationAfterDeclarator(
const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc,
UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS,
ParsedAttributes &Attrs, Decl **OwnedType = nullptr);
Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc,
SourceLocation AliasLoc, IdentifierInfo *Alias,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
bool isValidAfterTypeSpecifier(bool CouldBeBitfield);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
DeclSpecContext DSC,
ParsedAttributesWithRange &Attributes);
void SkipCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
unsigned TagType,
Decl *TagDecl);
void ParseCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
ParsedAttributesWithRange &Attrs,
unsigned TagType,
Decl *TagDecl);
ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo,
VirtSpecifiers &VS,
ExprResult &BitfieldSize,
LateParsedAttrList &LateAttrs);
void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D,
VirtSpecifiers &VS);
DeclGroupPtrTy ParseCXXClassMemberDeclaration(
AccessSpecifier AS, ParsedAttributes &Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ParsingDeclRAIIObject *DiagsFromTParams = nullptr);
DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(
AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
DeclSpec::TST TagType, Decl *Tag);
void ParseConstructorInitializer(Decl *ConstructorDecl);
MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
Decl *ThisDecl);
//===--------------------------------------------------------------------===//
// C++ 10: Derived classes [class.derived]
TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
SourceLocation &EndLocation);
void ParseBaseClause(Decl *ClassDecl);
BaseResult ParseBaseSpecifier(Decl *ClassDecl);
AccessSpecifier getAccessSpecifierIfPresent() const;
bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Id,
bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
//===--------------------------------------------------------------------===//
// OpenMP: Directives and clauses.
/// Parse clauses for '#pragma omp declare simd'.
DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr,
CachedTokens &Toks,
SourceLocation Loc);
/// Parse clauses for '#pragma omp declare target'.
DeclGroupPtrTy ParseOMPDeclareTargetClauses();
/// Parse '#pragma omp end declare target'.
void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind,
SourceLocation Loc);
/// Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
DeclSpec::TST TagType = DeclSpec::TST_unspecified,
Decl *TagDecl = nullptr);
/// Parse 'omp declare reduction' construct.
DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS);
/// Parses initializer for provided omp_priv declaration inside the reduction
/// initializer.
void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm);
/// Parses 'omp declare mapper' directive.
DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS);
/// Parses variable declaration in 'omp declare mapper' directive.
TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range,
DeclarationName &Name,
AccessSpecifier AS = AS_none);
/// Parses simple list of variables.
///
/// \param Kind Kind of the directive.
/// \param Callback Callback function to be called for the list elements.
/// \param AllowScopeSpecifier true, if the variables can have fully
/// qualified names.
///
bool ParseOpenMPSimpleVarList(
OpenMPDirectiveKind Kind,
const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> &
Callback,
bool AllowScopeSpecifier);
/// Parses declarative or executable directive.
///
/// \param StmtCtx The context in which we're parsing the directive.
StmtResult
ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx);
/// Parses clause of kind \a CKind for directive of a kind \a Kind.
///
/// \param DKind Kind of current directive.
/// \param CKind Kind of current clause.
/// \param FirstClause true, if this is the first clause of a kind \a CKind
/// in current directive.
///
OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause);
/// Parses clause with a single expression of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses simple clause of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly);
/// Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses clause without any additional arguments.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false);
/// Parses clause with the list of variables of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind, bool ParseOnly);
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
/// \param RLoc Returned location of right paren.
ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc);
/// Data used for parsing list of variables in OpenMP clauses.
struct OpenMPVarListDataTy {
Expr *TailExpr = nullptr;
SourceLocation ColonLoc;
SourceLocation RLoc;
CXXScopeSpec ReductionOrMapperIdScopeSpec;
DeclarationNameInfo ReductionOrMapperId;
OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
OpenMPLinearClauseKind LinKind = OMPC_LINEAR_val;
SmallVector<OpenMPMapModifierKind, OMPMapClause::NumberOfModifiers>
MapTypeModifiers;
SmallVector<SourceLocation, OMPMapClause::NumberOfModifiers>
MapTypeModifiersLoc;
OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
bool IsMapTypeImplicit = false;
SourceLocation DepLinMapLoc;
};
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
OpenMPVarListDataTy &Data);
bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
bool AllowDestructorName,
bool AllowConstructorName,
bool AllowDeductionGuide,
ParsedType ObjectType,
SourceLocation *TemplateKWLoc,
UnqualifiedId &Result);
/// Parses the mapper modifier in map, to, and from clauses.
bool parseMapperModifier(OpenMPVarListDataTy &Data);
/// Parses map-type-modifiers in map clause.
/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list)
/// where, map-type-modifier ::= always | close | mapper(mapper-identifier)
bool parseMapTypeModifiers(OpenMPVarListDataTy &Data);
private:
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
// C++ 14.1: Template Parameters [temp.param]
Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS);
Decl *ParseSingleDeclarationAfterTemplate(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none);
bool ParseTemplateParameters(unsigned Depth,
SmallVectorImpl<NamedDecl *> &TemplateParams,
SourceLocation &LAngleLoc,
SourceLocation &RAngleLoc);
bool ParseTemplateParameterList(unsigned Depth,
SmallVectorImpl<NamedDecl*> &TemplateParams);
bool isStartOfTemplateTypeParameter();
NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position);
void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc,
SourceLocation CorrectLoc,
bool AlreadyHasEllipsis,
bool IdentifierHasName);
void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
Declarator &D);
// C++ 14.3: Template arguments [temp.arg]
typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList);
bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
SourceLocation &RAngleLoc);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &TemplateName,
bool AllowTypeAnnotation = true);
void AnnotateTemplateIdTokenAsType(bool IsClassName = false);
bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(DeclaratorContext Context,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
// C++2a: Template, concept definition [temp]
Decl *
ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// Modules
DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl);
Decl *ParseModuleImport(SourceLocation AtLoc);
bool parseMisplacedModuleImport();
bool tryParseMisplacedModuleImport() {
tok::TokenKind Kind = Tok.getKind();
if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end ||
Kind == tok::annot_module_include)
return parseMisplacedModuleImport();
return false;
}
bool ParseModuleName(
SourceLocation UseLoc,
SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path,
bool IsImport);
//===--------------------------------------------------------------------===//
// C++11/G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseTypeTrait();
//===--------------------------------------------------------------------===//
// Embarcadero: Arary and Expression Traits
ExprResult ParseArrayTypeTrait();
ExprResult ParseExpressionTrait();
//===--------------------------------------------------------------------===//
// Preprocessor code-completion pass-through
void CodeCompleteDirective(bool InConditional) override;
void CodeCompleteInConditionalExclusion() override;
void CodeCompleteMacroName(bool IsDefinition) override;
void CodeCompletePreprocessorExpression() override;
void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo,
unsigned ArgumentIndex) override;
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override;
void CodeCompleteNaturalLanguage() override;
};
} // end namespace clang
#endif
|
single_copyprivate.c |
#include <omp.h>
#include <stdio.h>
#define LOOPCOUNT 200
int
check_single_copyprivate (FILE * logFile)
{
int result = 0;
int nr_iterations = 0;
int i;
#pragma omp parallel private(i)
{
for (i = 0; i < LOOPCOUNT; i++)
{
int j;
/*
int thread;
thread=omp_get_thread_num();
*/
#pragma omp single copyprivate(j)
{
nr_iterations++;
j = i;
/*printf("thread %d assigns ,j=%d,i=%d\n",thread,j,i); */
}
/* #pragma omp barrier */
#pragma omp critical
{
/*printf("thread=%d,j=%d,i=%d\n",thread,j,i); */
result = result + j - i;
}
#pragma omp barrier
} /* end of for */
} /* end of parallel */
return (result == 0) && (nr_iterations == LOOPCOUNT);
}
int
crosscheck_single_copyprivate (FILE * logFile)
{
int result = 0;
int nr_iterations = 0;
int i;
#pragma omp parallel private(i)
{
for (i = 0; i < LOOPCOUNT; i++)
{
int j;
/*
int thread;
thread=omp_get_thread_num();
*/
#pragma omp single private(j)
{
nr_iterations++;
j = i;
/*printf("thread %d assigns ,j=%d,i=%d\n",thread,j,i); */
}
/* #pragma omp barrier */
#pragma omp critical
{
/*printf("thread=%d,j=%d,i=%d\n",thread,j,i); */
result = result + j - i;
}
#pragma omp barrier
} /* end of for */
} /* end of parallel */
return (result == 0) && (nr_iterations == LOOPCOUNT);
}
|
Parallelizer.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_PARALLELIZER_H
#define EIGEN_PARALLELIZER_H
namespace Eigen {
namespace internal {
/** \internal */
inline void manage_multi_threading(Action action, int *v) {
static EIGEN_UNUSED int m_maxThreads = -1;
if (action == SetAction) {
eigen_internal_assert(v != 0);
m_maxThreads = *v;
} else if (action == GetAction) {
eigen_internal_assert(v != 0);
#ifdef EIGEN_HAS_OPENMP
if (m_maxThreads > 0)
*v = m_maxThreads;
else
*v = omp_get_max_threads();
#else
*v = 1;
#endif
} else {
eigen_internal_assert(false);
}
}
} // namespace internal
/** Must be call first when calling Eigen from multiple threads */
inline void initParallel() {
int nbt;
internal::manage_multi_threading(GetAction, &nbt);
std::ptrdiff_t l1, l2;
internal::manage_caching_sizes(GetAction, &l1, &l2);
}
/** \returns the max number of threads reserved for Eigen
* \sa setNbThreads */
inline int nbThreads() {
int ret;
internal::manage_multi_threading(GetAction, &ret);
return ret;
}
/** Sets the max number of threads reserved for Eigen
* \sa nbThreads */
inline void setNbThreads(int v) {
internal::manage_multi_threading(SetAction, &v);
}
namespace internal {
template <typename Index> struct GemmParallelInfo {
GemmParallelInfo() : sync(-1), users(0), rhs_start(0), rhs_length(0) {}
int volatile sync;
int volatile users;
Index rhs_start;
Index rhs_length;
};
template <bool Condition, typename Functor, typename Index>
void parallelize_gemm(const Functor &func, Index rows, Index cols,
bool transpose) {
// TODO when EIGEN_USE_BLAS is defined,
// we should still enable OMP for other scalar types
#if !(defined(EIGEN_HAS_OPENMP)) || defined(EIGEN_USE_BLAS)
// FIXME the transpose variable is only needed to properly split
// the matrix product when multithreading is enabled. This is a temporary
// fix to support row-major destination matrices. This whole
// parallelizer mechanism has to be redisigned anyway.
EIGEN_UNUSED_VARIABLE(transpose);
func(0, rows, 0, cols);
#else
// Dynamically check whether we should enable or disable OpenMP.
// The conditions are:
// - the max number of threads we can create is greater than 1
// - we are not already in a parallel code
// - the sizes are large enough
// 1- are we already in a parallel session?
// FIXME omp_get_num_threads()>1 only works for openmp, what if the user
// does not use openmp?
if ((!Condition) || (omp_get_num_threads() > 1))
return func(0, rows, 0, cols);
Index size = transpose ? cols : rows;
// 2- compute the maximal number of threads from the size of the product:
// FIXME this has to be fine tuned
Index max_threads = std::max<Index>(1, size / 32);
// 3 - compute the number of threads we are going to use
Index threads = std::min<Index>(nbThreads(), max_threads);
if (threads == 1)
return func(0, rows, 0, cols);
Eigen::initParallel();
func.initParallelSession();
if (transpose)
std::swap(rows, cols);
Index blockCols = (cols / threads) & ~Index(0x3);
Index blockRows = (rows / threads) & ~Index(0x7);
GemmParallelInfo<Index> *info = new GemmParallelInfo<Index>[threads];
#pragma omp parallel for schedule(static, 1) num_threads(threads)
for (Index i = 0; i < threads; ++i) {
Index r0 = i * blockRows;
Index actualBlockRows = (i + 1 == threads) ? rows - r0 : blockRows;
Index c0 = i * blockCols;
Index actualBlockCols = (i + 1 == threads) ? cols - c0 : blockCols;
info[i].rhs_start = c0;
info[i].rhs_length = actualBlockCols;
if (transpose)
func(0, cols, r0, actualBlockRows, info);
else
func(r0, actualBlockRows, 0, cols, info);
}
delete[] info;
#endif
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_PARALLELIZER_H
|
minimal.c | /*
Minimal offloading example from
https://en.wikibooks.org/wiki/LLVM_Compiler/OpenMP_Support
compilation:
clang -fopenmp -O3 -fopenmp-targets=nvptx64-nvidia-cuda minimal.c -o minimal
*/
#include <stdio.h>
#include <omp.h>
int main(void) {
int isHost = 0;
#pragma omp target map(from: isHost)
{ isHost = omp_is_initial_device(); }
if (isHost < 0) {
printf("Runtime error, isHost=%d\n", isHost);
}
// CHECK: Target region executed on the device
printf("Target region executed on the %s\n", isHost ? "host" : "device");
return isHost;
}
|
elastic_kernel_3d_so12.c | #define _POSIX_C_SOURCE 200809L
#include "stdlib.h"
#include "math.h"
#include "sys/time.h"
#include "xmmintrin.h"
#include "pmmintrin.h"
#include <stdio.h>
#include "omp.h"
#define min(a, b) (((a) < (b)) ? (a) : (b))
#define max(a, b) (((a) > (b)) ? (a) : (b))
struct dataobj
{
void *restrict data;
int *size;
int *npsize;
int *dsize;
int *hsize;
int *hofs;
int *oofs;
};
struct profiler
{
double section0;
};
int Kernel(struct dataobj *restrict block_sizes_vec, const float h_x, const float h_y, const float h_z, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict save_src_fxx_vec, struct dataobj *restrict save_src_fyy_vec, struct dataobj *restrict save_src_fzz_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict tau_sol_xx_vec, struct dataobj *restrict tau_sol_xy_vec, struct dataobj *restrict tau_sol_xz_vec, struct dataobj *restrict tau_sol_yy_vec, struct dataobj *restrict tau_sol_yz_vec, struct dataobj *restrict tau_sol_zz_vec, struct dataobj *restrict v_sol_x_vec, struct dataobj *restrict v_sol_y_vec, struct dataobj *restrict v_sol_z_vec, const int sp_zi_m, const int time_M, const int time_m, struct profiler *timers, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int nthreads_nonaffine)
{
int(*restrict block_sizes) __attribute__((aligned(64))) = (int(*))block_sizes_vec->data;
int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data;
float(*restrict save_src_fxx)[save_src_fxx_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_fxx_vec->size[1]])save_src_fxx_vec->data;
float(*restrict save_src_fyy)[save_src_fyy_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_fyy_vec->size[1]])save_src_fyy_vec->data;
float(*restrict save_src_fzz)[save_src_fzz_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_fzz_vec->size[1]])save_src_fzz_vec->data;
int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data;
int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data;
int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data;
float(*restrict tau_sol_xx)[tau_sol_xx_vec->size[1]][tau_sol_xx_vec->size[2]][tau_sol_xx_vec->size[3]] __attribute__((aligned(64))) = (float(*)[tau_sol_xx_vec->size[1]][tau_sol_xx_vec->size[2]][tau_sol_xx_vec->size[3]])tau_sol_xx_vec->data;
float(*restrict tau_sol_xy)[tau_sol_xy_vec->size[1]][tau_sol_xy_vec->size[2]][tau_sol_xy_vec->size[3]] __attribute__((aligned(64))) = (float(*)[tau_sol_xy_vec->size[1]][tau_sol_xy_vec->size[2]][tau_sol_xy_vec->size[3]])tau_sol_xy_vec->data;
float(*restrict tau_sol_xz)[tau_sol_xz_vec->size[1]][tau_sol_xz_vec->size[2]][tau_sol_xz_vec->size[3]] __attribute__((aligned(64))) = (float(*)[tau_sol_xz_vec->size[1]][tau_sol_xz_vec->size[2]][tau_sol_xz_vec->size[3]])tau_sol_xz_vec->data;
float(*restrict tau_sol_yy)[tau_sol_yy_vec->size[1]][tau_sol_yy_vec->size[2]][tau_sol_yy_vec->size[3]] __attribute__((aligned(64))) = (float(*)[tau_sol_yy_vec->size[1]][tau_sol_yy_vec->size[2]][tau_sol_yy_vec->size[3]])tau_sol_yy_vec->data;
float(*restrict tau_sol_yz)[tau_sol_yz_vec->size[1]][tau_sol_yz_vec->size[2]][tau_sol_yz_vec->size[3]] __attribute__((aligned(64))) = (float(*)[tau_sol_yz_vec->size[1]][tau_sol_yz_vec->size[2]][tau_sol_yz_vec->size[3]])tau_sol_yz_vec->data;
float(*restrict tau_sol_zz)[tau_sol_zz_vec->size[1]][tau_sol_zz_vec->size[2]][tau_sol_zz_vec->size[3]] __attribute__((aligned(64))) = (float(*)[tau_sol_zz_vec->size[1]][tau_sol_zz_vec->size[2]][tau_sol_zz_vec->size[3]])tau_sol_zz_vec->data;
float(*restrict v_sol_x)[v_sol_x_vec->size[1]][v_sol_x_vec->size[2]][v_sol_x_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_sol_x_vec->size[1]][v_sol_x_vec->size[2]][v_sol_x_vec->size[3]])v_sol_x_vec->data;
float(*restrict v_sol_y)[v_sol_y_vec->size[1]][v_sol_y_vec->size[2]][v_sol_y_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_sol_y_vec->size[1]][v_sol_y_vec->size[2]][v_sol_y_vec->size[3]])v_sol_y_vec->data;
float(*restrict v_sol_z)[v_sol_z_vec->size[1]][v_sol_z_vec->size[2]][v_sol_z_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_sol_z_vec->size[1]][v_sol_z_vec->size[2]][v_sol_z_vec->size[3]])v_sol_z_vec->data;
/* Flush denormal numbers to zero in hardware */
_MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
int xb_size = block_sizes[0];
int y0_blk0_size = block_sizes[3];
int x0_blk0_size = block_sizes[2];
int yb_size = block_sizes[1];
int sf = 12;
int t_blk_size = 2 * sf * (time_M - time_m);
//int xb_size = 64;
//int yb_size = 64;
//x0_blk0_size = 8;
//y0_blk0_size = 8;
printf(" Tiles: %d, %d ::: Blocks %d, %d \n", xb_size, yb_size, x0_blk0_size, y0_blk0_size);
struct timeval start_section0, end_section0;
gettimeofday(&start_section0, NULL);
for (int t_blk = time_m; t_blk < sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block
{
for (int xb = x_m; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size)
{
//printf(" Change of outer xblock %d \n", xb);
for (int yb = y_m; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size)
{
for (int time = t_blk, t0 = (time) % (2), t1 = (time + 1) % (2); time <= 1 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1)) + 1) % (2), t1 = (((time / sf) % (time_M - time_m + 1))) % (2))
{
int tw = ((time / sf) % (time_M - time_m + 1));
#pragma omp parallel num_threads(nthreads)
{
//printf(" Change of time block : %d \n", tw);
#pragma omp for collapse(2) schedule(dynamic, 1)
for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size)
{
for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size)
{
//printf(" Change of inner xblock %d \n", x0_blk0);
for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size - 1)), (x0_blk0 + x0_blk0_size - 1)); x++)
{
for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size - 1)), (y0_blk0 + y0_blk0_size - 1)); y++)
{
//printf(" Updating velocity x %d \n", x - time + 4);
//printf(" \n PDE update : \n");
#pragma omp simd aligned(tau_sol_xx, tau_sol_xz, tau_sol_zz, v_sol_x, v_sol_z : 32)
for (int z = z_m; z <= z_M; z += 1)
{
//printf(" Updating velocity x %d z: %d \n", x - time + 4, z + 4);
float r26 = 1.0 / h_z;
float r25 = 1.0 / h_y;
float r24 = 1.0 / h_x;
v_sol_x[t1][x - time + 12][y - time + 12][z + 12] = r24 * (1.43043849497863e-5F * (tau_sol_xx[t0][x - time + 7][y - time + 12][z + 12] - tau_sol_xx[t0][x - time + 18][y - time + 12][z + 12]) + 2.35051066246684e-4F * (-tau_sol_xx[t0][x - time + 8][y - time + 12][z + 12] + tau_sol_xx[t0][x - time + 17][y - time + 12][z + 12]) + 1.94276901694599e-3F * (tau_sol_xx[t0][x - time + 9][y - time + 12][z + 12] - tau_sol_xx[t0][x - time + 16][y - time + 12][z + 12]) + 1.14234818208691e-2F * (-tau_sol_xx[t0][x - time + 10][y - time + 12][z + 12] + tau_sol_xx[t0][x - time + 15][y - time + 12][z + 12]) + 6.34637878910706e-2F * (tau_sol_xx[t0][x - time + 11][y - time + 12][z + 12] - tau_sol_xx[t0][x - time + 14][y - time + 12][z + 12]) + 7.9964372742749e-1F * (-tau_sol_xx[t0][x - time + 12][y - time + 12][z + 12] + tau_sol_xx[t0][x - time + 13][y - time + 12][z + 12])) + r25 * (1.43043849497863e-5F * (tau_sol_xy[t0][x - time + 12][y - time + 6][z + 12] - tau_sol_xy[t0][x - time + 12][y - time + 17][z + 12]) + 2.35051066246684e-4F * (-tau_sol_xy[t0][x - time + 12][y - time + 7][z + 12] + tau_sol_xy[t0][x - time + 12][y - time + 16][z + 12]) + 1.94276901694599e-3F * (tau_sol_xy[t0][x - time + 12][y - time + 8][z + 12] - tau_sol_xy[t0][x - time + 12][y - time + 15][z + 12]) + 1.14234818208691e-2F * (-tau_sol_xy[t0][x - time + 12][y - time + 9][z + 12] + tau_sol_xy[t0][x - time + 12][y - time + 14][z + 12]) + 6.34637878910706e-2F * (tau_sol_xy[t0][x - time + 12][y - time + 10][z + 12] - tau_sol_xy[t0][x - time + 12][y - time + 13][z + 12]) + 7.9964372742749e-1F * (-tau_sol_xy[t0][x - time + 12][y - time + 11][z + 12] + tau_sol_xy[t0][x - time + 12][y - time + 12][z + 12])) + r26 * (1.43043849497863e-5F * (tau_sol_xz[t0][x - time + 12][y - time + 12][z + 6] - tau_sol_xz[t0][x - time + 12][y - time + 12][z + 17]) + 2.35051066246684e-4F * (-tau_sol_xz[t0][x - time + 12][y - time + 12][z + 7] + tau_sol_xz[t0][x - time + 12][y - time + 12][z + 16]) + 1.94276901694599e-3F * (tau_sol_xz[t0][x - time + 12][y - time + 12][z + 8] - tau_sol_xz[t0][x - time + 12][y - time + 12][z + 15]) + 1.14234818208691e-2F * (-tau_sol_xz[t0][x - time + 12][y - time + 12][z + 9] + tau_sol_xz[t0][x - time + 12][y - time + 12][z + 14]) + 6.34637878910706e-2F * (tau_sol_xz[t0][x - time + 12][y - time + 12][z + 10] - tau_sol_xz[t0][x - time + 12][y - time + 12][z + 13]) + 7.9964372742749e-1F * (-tau_sol_xz[t0][x - time + 12][y - time + 12][z + 11] + tau_sol_xz[t0][x - time + 12][y - time + 12][z + 12])) + v_sol_x[t0][x - time + 12][y - time + 12][z + 12];
v_sol_y[t1][x - time + 12][y - time + 12][z + 12] = r24 * (1.43043849497863e-5F * (tau_sol_xy[t0][x - time + 6][y - time + 12][z + 12] - tau_sol_xy[t0][x - time + 17][y - time + 12][z + 12]) + 2.35051066246684e-4F * (-tau_sol_xy[t0][x - time + 7][y - time + 12][z + 12] + tau_sol_xy[t0][x - time + 16][y - time + 12][z + 12]) + 1.94276901694599e-3F * (tau_sol_xy[t0][x - time + 8][y - time + 12][z + 12] - tau_sol_xy[t0][x - time + 15][y - time + 12][z + 12]) + 1.14234818208691e-2F * (-tau_sol_xy[t0][x - time + 9][y - time + 12][z + 12] + tau_sol_xy[t0][x - time + 14][y - time + 12][z + 12]) + 6.34637878910706e-2F * (tau_sol_xy[t0][x - time + 10][y - time + 12][z + 12] - tau_sol_xy[t0][x - time + 13][y - time + 12][z + 12]) + 7.9964372742749e-1F * (-tau_sol_xy[t0][x - time + 11][y - time + 12][z + 12] + tau_sol_xy[t0][x - time + 12][y - time + 12][z + 12])) + r25 * (1.43043849497863e-5F * (tau_sol_yy[t0][x - time + 12][y - time + 7][z + 12] - tau_sol_yy[t0][x - time + 12][y - time + 18][z + 12]) + 2.35051066246684e-4F * (-tau_sol_yy[t0][x - time + 12][y - time + 8][z + 12] + tau_sol_yy[t0][x - time + 12][y - time + 17][z + 12]) + 1.94276901694599e-3F * (tau_sol_yy[t0][x - time + 12][y - time + 9][z + 12] - tau_sol_yy[t0][x - time + 12][y - time + 16][z + 12]) + 1.14234818208691e-2F * (-tau_sol_yy[t0][x - time + 12][y - time + 10][z + 12] + tau_sol_yy[t0][x - time + 12][y - time + 15][z + 12]) + 6.34637878910706e-2F * (tau_sol_yy[t0][x - time + 12][y - time + 11][z + 12] - tau_sol_yy[t0][x - time + 12][y - time + 14][z + 12]) + 7.9964372742749e-1F * (-tau_sol_yy[t0][x - time + 12][y - time + 12][z + 12] + tau_sol_yy[t0][x - time + 12][y - time + 13][z + 12])) + r26 * (1.43043849497863e-5F * (tau_sol_yz[t0][x - time + 12][y - time + 12][z + 6] - tau_sol_yz[t0][x - time + 12][y - time + 12][z + 17]) + 2.35051066246684e-4F * (-tau_sol_yz[t0][x - time + 12][y - time + 12][z + 7] + tau_sol_yz[t0][x - time + 12][y - time + 12][z + 16]) + 1.94276901694599e-3F * (tau_sol_yz[t0][x - time + 12][y - time + 12][z + 8] - tau_sol_yz[t0][x - time + 12][y - time + 12][z + 15]) + 1.14234818208691e-2F * (-tau_sol_yz[t0][x - time + 12][y - time + 12][z + 9] + tau_sol_yz[t0][x - time + 12][y - time + 12][z + 14]) + 6.34637878910706e-2F * (tau_sol_yz[t0][x - time + 12][y - time + 12][z + 10] - tau_sol_yz[t0][x - time + 12][y - time + 12][z + 13]) + 7.9964372742749e-1F * (-tau_sol_yz[t0][x - time + 12][y - time + 12][z + 11] + tau_sol_yz[t0][x - time + 12][y - time + 12][z + 12])) + v_sol_y[t0][x - time + 12][y - time + 12][z + 12];
v_sol_z[t1][x - time + 12][y - time + 12][z + 12] = r24 * (1.43043849497863e-5F * (tau_sol_xz[t0][x - time + 6][y - time + 12][z + 12] - tau_sol_xz[t0][x - time + 17][y - time + 12][z + 12]) + 2.35051066246684e-4F * (-tau_sol_xz[t0][x - time + 7][y - time + 12][z + 12] + tau_sol_xz[t0][x - time + 16][y - time + 12][z + 12]) + 1.94276901694599e-3F * (tau_sol_xz[t0][x - time + 8][y - time + 12][z + 12] - tau_sol_xz[t0][x - time + 15][y - time + 12][z + 12]) + 1.14234818208691e-2F * (-tau_sol_xz[t0][x - time + 9][y - time + 12][z + 12] + tau_sol_xz[t0][x - time + 14][y - time + 12][z + 12]) + 6.34637878910706e-2F * (tau_sol_xz[t0][x - time + 10][y - time + 12][z + 12] - tau_sol_xz[t0][x - time + 13][y - time + 12][z + 12]) + 7.9964372742749e-1F * (-tau_sol_xz[t0][x - time + 11][y - time + 12][z + 12] + tau_sol_xz[t0][x - time + 12][y - time + 12][z + 12])) + r25 * (1.43043849497863e-5F * (tau_sol_yz[t0][x - time + 12][y - time + 6][z + 12] - tau_sol_yz[t0][x - time + 12][y - time + 17][z + 12]) + 2.35051066246684e-4F * (-tau_sol_yz[t0][x - time + 12][y - time + 7][z + 12] + tau_sol_yz[t0][x - time + 12][y - time + 16][z + 12]) + 1.94276901694599e-3F * (tau_sol_yz[t0][x - time + 12][y - time + 8][z + 12] - tau_sol_yz[t0][x - time + 12][y - time + 15][z + 12]) + 1.14234818208691e-2F * (-tau_sol_yz[t0][x - time + 12][y - time + 9][z + 12] + tau_sol_yz[t0][x - time + 12][y - time + 14][z + 12]) + 6.34637878910706e-2F * (tau_sol_yz[t0][x - time + 12][y - time + 10][z + 12] - tau_sol_yz[t0][x - time + 12][y - time + 13][z + 12]) + 7.9964372742749e-1F * (-tau_sol_yz[t0][x - time + 12][y - time + 11][z + 12] + tau_sol_yz[t0][x - time + 12][y - time + 12][z + 12])) + r26 * (1.43043849497863e-5F * (tau_sol_zz[t0][x - time + 12][y - time + 12][z + 7] - tau_sol_zz[t0][x - time + 12][y - time + 12][z + 18]) + 2.35051066246684e-4F * (-tau_sol_zz[t0][x - time + 12][y - time + 12][z + 8] + tau_sol_zz[t0][x - time + 12][y - time + 12][z + 17]) + 1.94276901694599e-3F * (tau_sol_zz[t0][x - time + 12][y - time + 12][z + 9] - tau_sol_zz[t0][x - time + 12][y - time + 12][z + 16]) + 1.14234818208691e-2F * (-tau_sol_zz[t0][x - time + 12][y - time + 12][z + 10] + tau_sol_zz[t0][x - time + 12][y - time + 12][z + 15]) + 6.34637878910706e-2F * (tau_sol_zz[t0][x - time + 12][y - time + 12][z + 11] - tau_sol_zz[t0][x - time + 12][y - time + 12][z + 14]) + 7.9964372742749e-1F * (-tau_sol_zz[t0][x - time + 12][y - time + 12][z + 12] + tau_sol_zz[t0][x - time + 12][y - time + 12][z + 13])) + v_sol_z[t0][x - time + 12][y - time + 12][z + 12];
}
}
}
}
}
}
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(2) schedule(dynamic, 1)
for (int x0_blk0 = max((x_m + time), xb - 6); x0_blk0 <= +min((x_M + time), (xb - 6 + xb_size)); x0_blk0 += x0_blk0_size)
{
for (int y0_blk0 = max((y_m + time), yb - 6); y0_blk0 <= +min((y_M + time), (yb - 6 + yb_size)); y0_blk0 += y0_blk0_size)
{
for (int x = x0_blk0; x <= min(min((x_M + time), (xb - 6 + xb_size - 1)), (x0_blk0 + x0_blk0_size - 1)); x++)
{
for (int y = y0_blk0; y <= min(min((y_M + time), (yb - 6 + yb_size - 1)), (y0_blk0 + y0_blk0_size - 1)); y++)
{
//printf(" Updating stress x %d \n", x - time + 4);
#pragma omp simd aligned(tau_sol_xx, tau_sol_xz, tau_sol_zz, v_sol_x, v_sol_z : 32)
for (int z = z_m; z <= z_M; z += 1)
{
//printf(" Updating x %d z: %d \n", x - time + 4, z + 4);
float r53 = -v_sol_z[t1][x - time + 12][y - time + 12][z + 12];
float r52 = -v_sol_y[t1][x - time + 12][y - time + 12][z + 12];
float r51 = -v_sol_x[t1][x - time + 12][y - time + 12][z + 12];
float r50 = v_sol_y[t1][x - time + 12][y - time + 8][z + 12] - v_sol_y[t1][x - time + 12][y - time + 15][z + 12];
float r49 = -v_sol_y[t1][x - time + 12][y - time + 11][z + 12] + v_sol_y[t1][x - time + 12][y - time + 12][z + 12];
float r48 = -v_sol_y[t1][x - time + 12][y - time + 9][z + 12] + v_sol_y[t1][x - time + 12][y - time + 14][z + 12];
float r47 = -v_sol_y[t1][x - time + 12][y - time + 7][z + 12] + v_sol_y[t1][x - time + 12][y - time + 16][z + 12];
float r46 = v_sol_y[t1][x - time + 12][y - time + 6][z + 12] - v_sol_y[t1][x - time + 12][y - time + 17][z + 12];
float r45 = v_sol_y[t1][x - time + 12][y - time + 10][z + 12] - v_sol_y[t1][x - time + 12][y - time + 13][z + 12];
float r44 = 1.0 / h_y;
float r43 = v_sol_z[t1][x - time + 12][y - time + 12][z + 8] - v_sol_z[t1][x - time + 12][y - time + 12][z + 15];
float r42 = -v_sol_z[t1][x - time + 12][y - time + 12][z + 11] + v_sol_z[t1][x - time + 12][y - time + 12][z + 12];
float r41 = -v_sol_z[t1][x - time + 12][y - time + 12][z + 9] + v_sol_z[t1][x - time + 12][y - time + 12][z + 14];
float r40 = -v_sol_z[t1][x - time + 12][y - time + 12][z + 7] + v_sol_z[t1][x - time + 12][y - time + 12][z + 16];
float r39 = v_sol_z[t1][x - time + 12][y - time + 12][z + 6] - v_sol_z[t1][x - time + 12][y - time + 12][z + 17];
float r38 = v_sol_z[t1][x - time + 12][y - time + 12][z + 10] - v_sol_z[t1][x - time + 12][y - time + 12][z + 13];
float r37 = 1.0 / h_z;
float r36 = v_sol_x[t1][x - time + 8][y - time + 12][z + 12] - v_sol_x[t1][x - time + 15][y - time + 12][z + 12];
float r35 = -v_sol_x[t1][x - time + 11][y - time + 12][z + 12] + v_sol_x[t1][x - time + 12][y - time + 12][z + 12];
float r34 = -v_sol_x[t1][x - time + 9][y - time + 12][z + 12] + v_sol_x[t1][x - time + 14][y - time + 12][z + 12];
float r33 = -v_sol_x[t1][x - time + 7][y - time + 12][z + 12] + v_sol_x[t1][x - time + 16][y - time + 12][z + 12];
float r32 = v_sol_x[t1][x - time + 6][y - time + 12][z + 12] - v_sol_x[t1][x - time + 17][y - time + 12][z + 12];
float r31 = v_sol_x[t1][x - time + 10][y - time + 12][z + 12] - v_sol_x[t1][x - time + 13][y - time + 12][z + 12];
float r30 = 1.0 / h_x;
float r29 = r30 * (4.11245345534138e-1F * r31 + 9.26924144746152e-5F * r32 + 1.52313090927851e-3F * r33 + 7.40241621992317e-2F * r34 + 5.18169135373014F * r35 + 1.258914322981e-2F * r36);
float r28 = r37 * (4.11245345534138e-1F * r38 + 9.26924144746152e-5F * r39 + 1.52313090927851e-3F * r40 + 7.40241621992317e-2F * r41 + 5.18169135373014F * r42 + 1.258914322981e-2F * r43);
float r27 = r44 * (4.11245345534138e-1F * r45 + 9.26924144746152e-5F * r46 + 1.52313090927851e-3F * r47 + 7.40241621992317e-2F * r48 + 5.18169135373014F * r49 + 1.258914322981e-2F * r50);
tau_sol_xx[t1][x - time + 12][y - time + 12][z + 12] = r27 + r28 + r30 * (8.22490691068276e-1F * r31 + 1.8538482894923e-4F * r32 + 3.04626181855702e-3F * r33 + 1.48048324398463e-1F * r34 + 1.03633827074603e+1F * r35 + 2.517828645962e-2F * r36) + tau_sol_xx[t0][x - time + 12][y - time + 12][z + 12];
tau_sol_xy[t1][x - time + 12][y - time + 12][z + 12] = r30 * (2.59084567686507F * (r52 + v_sol_y[t1][x - time + 13][y - time + 12][z + 12]) + 4.63462072373076e-5F * (v_sol_y[t1][x - time + 7][y - time + 12][z + 12] - v_sol_y[t1][x - time + 18][y - time + 12][z + 12]) + 7.61565454639255e-4F * (-v_sol_y[t1][x - time + 8][y - time + 12][z + 12] + v_sol_y[t1][x - time + 17][y - time + 12][z + 12]) + 6.29457161490501e-3F * (v_sol_y[t1][x - time + 9][y - time + 12][z + 12] - v_sol_y[t1][x - time + 16][y - time + 12][z + 12]) + 3.70120810996159e-2F * (-v_sol_y[t1][x - time + 10][y - time + 12][z + 12] + v_sol_y[t1][x - time + 15][y - time + 12][z + 12]) + 2.05622672767069e-1F * (v_sol_y[t1][x - time + 11][y - time + 12][z + 12] - v_sol_y[t1][x - time + 14][y - time + 12][z + 12])) + r44 * (2.59084567686507F * (r51 + v_sol_x[t1][x - time + 12][y - time + 13][z + 12]) + 4.63462072373076e-5F * (v_sol_x[t1][x - time + 12][y - time + 7][z + 12] - v_sol_x[t1][x - time + 12][y - time + 18][z + 12]) + 7.61565454639255e-4F * (-v_sol_x[t1][x - time + 12][y - time + 8][z + 12] + v_sol_x[t1][x - time + 12][y - time + 17][z + 12]) + 6.29457161490501e-3F * (v_sol_x[t1][x - time + 12][y - time + 9][z + 12] - v_sol_x[t1][x - time + 12][y - time + 16][z + 12]) + 3.70120810996159e-2F * (-v_sol_x[t1][x - time + 12][y - time + 10][z + 12] + v_sol_x[t1][x - time + 12][y - time + 15][z + 12]) + 2.05622672767069e-1F * (v_sol_x[t1][x - time + 12][y - time + 11][z + 12] - v_sol_x[t1][x - time + 12][y - time + 14][z + 12])) + tau_sol_xy[t0][x - time + 12][y - time + 12][z + 12];
tau_sol_xz[t1][x - time + 12][y - time + 12][z + 12] = r30 * (2.59084567686507F * (r53 + v_sol_z[t1][x - time + 13][y - time + 12][z + 12]) + 4.63462072373076e-5F * (v_sol_z[t1][x - time + 7][y - time + 12][z + 12] - v_sol_z[t1][x - time + 18][y - time + 12][z + 12]) + 7.61565454639255e-4F * (-v_sol_z[t1][x - time + 8][y - time + 12][z + 12] + v_sol_z[t1][x - time + 17][y - time + 12][z + 12]) + 6.29457161490501e-3F * (v_sol_z[t1][x - time + 9][y - time + 12][z + 12] - v_sol_z[t1][x - time + 16][y - time + 12][z + 12]) + 3.70120810996159e-2F * (-v_sol_z[t1][x - time + 10][y - time + 12][z + 12] + v_sol_z[t1][x - time + 15][y - time + 12][z + 12]) + 2.05622672767069e-1F * (v_sol_z[t1][x - time + 11][y - time + 12][z + 12] - v_sol_z[t1][x - time + 14][y - time + 12][z + 12])) + r37 * (2.59084567686507F * (r51 + v_sol_x[t1][x - time + 12][y - time + 12][z + 13]) + 4.63462072373076e-5F * (v_sol_x[t1][x - time + 12][y - time + 12][z + 7] - v_sol_x[t1][x - time + 12][y - time + 12][z + 18]) + 7.61565454639255e-4F * (-v_sol_x[t1][x - time + 12][y - time + 12][z + 8] + v_sol_x[t1][x - time + 12][y - time + 12][z + 17]) + 6.29457161490501e-3F * (v_sol_x[t1][x - time + 12][y - time + 12][z + 9] - v_sol_x[t1][x - time + 12][y - time + 12][z + 16]) + 3.70120810996159e-2F * (-v_sol_x[t1][x - time + 12][y - time + 12][z + 10] + v_sol_x[t1][x - time + 12][y - time + 12][z + 15]) + 2.05622672767069e-1F * (v_sol_x[t1][x - time + 12][y - time + 12][z + 11] - v_sol_x[t1][x - time + 12][y - time + 12][z + 14])) + tau_sol_xz[t0][x - time + 12][y - time + 12][z + 12];
tau_sol_yy[t1][x - time + 12][y - time + 12][z + 12] = r28 + r29 + r44 * (8.22490691068276e-1F * r45 + 1.8538482894923e-4F * r46 + 3.04626181855702e-3F * r47 + 1.48048324398463e-1F * r48 + 1.03633827074603e+1F * r49 + 2.517828645962e-2F * r50) + tau_sol_yy[t0][x - time + 12][y - time + 12][z + 12];
tau_sol_yz[t1][x - time + 12][y - time + 12][z + 12] = r37 * (2.59084567686507F * (r52 + v_sol_y[t1][x - time + 12][y - time + 12][z + 13]) + 4.63462072373076e-5F * (v_sol_y[t1][x - time + 12][y - time + 12][z + 7] - v_sol_y[t1][x - time + 12][y - time + 12][z + 18]) + 7.61565454639255e-4F * (-v_sol_y[t1][x - time + 12][y - time + 12][z + 8] + v_sol_y[t1][x - time + 12][y - time + 12][z + 17]) + 6.29457161490501e-3F * (v_sol_y[t1][x - time + 12][y - time + 12][z + 9] - v_sol_y[t1][x - time + 12][y - time + 12][z + 16]) + 3.70120810996159e-2F * (-v_sol_y[t1][x - time + 12][y - time + 12][z + 10] + v_sol_y[t1][x - time + 12][y - time + 12][z + 15]) + 2.05622672767069e-1F * (v_sol_y[t1][x - time + 12][y - time + 12][z + 11] - v_sol_y[t1][x - time + 12][y - time + 12][z + 14])) + r44 * (2.59084567686507F * (r53 + v_sol_z[t1][x - time + 12][y - time + 13][z + 12]) + 4.63462072373076e-5F * (v_sol_z[t1][x - time + 12][y - time + 7][z + 12] - v_sol_z[t1][x - time + 12][y - time + 18][z + 12]) + 7.61565454639255e-4F * (-v_sol_z[t1][x - time + 12][y - time + 8][z + 12] + v_sol_z[t1][x - time + 12][y - time + 17][z + 12]) + 6.29457161490501e-3F * (v_sol_z[t1][x - time + 12][y - time + 9][z + 12] - v_sol_z[t1][x - time + 12][y - time + 16][z + 12]) + 3.70120810996159e-2F * (-v_sol_z[t1][x - time + 12][y - time + 10][z + 12] + v_sol_z[t1][x - time + 12][y - time + 15][z + 12]) + 2.05622672767069e-1F * (v_sol_z[t1][x - time + 12][y - time + 11][z + 12] - v_sol_z[t1][x - time + 12][y - time + 14][z + 12])) + tau_sol_yz[t0][x - time + 12][y - time + 12][z + 12];
tau_sol_zz[t1][x - time + 12][y - time + 12][z + 12] = r27 + r29 + r37 * (8.22490691068276e-1F * r38 + 1.8538482894923e-4F * r39 + 3.04626181855702e-3F * r40 + 1.48048324398463e-1F * r41 + 1.03633827074603e+1F * r42 + 2.517828645962e-2F * r43) + tau_sol_zz[t0][x - time + 12][y - time + 12][z + 12];
}
for (int sp_zi = sp_zi_m; sp_zi <= nnz_sp_source_mask[x - time][y - time] - 1; sp_zi += 1)
{
//printf("\n Source_injection at : ");
int zind = sp_source_mask[x - time][y - time][sp_zi];
float r0 = save_src_fxx[((time / sf) % (time_M - time_m + 1))][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind];
float r1 = save_src_fyy[((time / sf) % (time_M - time_m + 1))][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind];
float r2 = save_src_fzz[((time / sf) % (time_M - time_m + 1))][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind];
tau_sol_xx[t1][x - time + 12][y - time + 12][zind + 12] += r0;
tau_sol_yy[t1][x - time + 12][y - time + 12][zind + 12] += r1;
tau_sol_zz[t1][x - time + 12][y - time + 12][zind + 12] += r2;
//printf(" Time %d , at : %d, %d \n", tw, x - time + 4, zind + 4);
}
}
}
}
}
}
}
}
}
}
/* End section0 */
gettimeofday(&end_section0, NULL);
timers->section0 += (double)(end_section0.tv_sec - start_section0.tv_sec) + (double)(end_section0.tv_usec - start_section0.tv_usec) / 1000000;
return 0;
}
/* Backdoor edit at Tue Jul 28 12:03:11 2020*/
/* Backdoor edit at Tue Jul 28 12:27:09 2020*/
/* Backdoor edit at Tue Jul 28 12:28:10 2020*/
/* Backdoor edit at Tue Jul 28 15:17:20 2020*/
|
GB_binop__minus_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__minus_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__minus_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__minus_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__minus_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_uint32)
// A*D function (colscale): GB (_AxD__minus_uint32)
// D*A function (rowscale): GB (_DxB__minus_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__minus_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__minus_uint32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_uint32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_uint32)
// C=scalar+B GB (_bind1st__minus_uint32)
// C=scalar+B' GB (_bind1st_tran__minus_uint32)
// C=A+scalar GB (_bind2nd__minus_uint32)
// C=A'+scalar GB (_bind2nd_tran__minus_uint32)
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = (aij - bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x - y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINUS || GxB_NO_UINT32 || GxB_NO_MINUS_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__minus_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__minus_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__minus_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__minus_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__minus_uint32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__minus_uint32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__minus_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__minus_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__minus_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__minus_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__minus_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__minus_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x - bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__minus_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij - y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x - aij) ; \
}
GrB_Info GB (_bind1st_tran__minus_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij - y) ; \
}
GrB_Info GB (_bind2nd_tran__minus_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
facedetectcnn.h | /*
By downloading, copying, installing or using the software you agree to this license.
If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement For libfacedetection
(3-clause BSD License)
Copyright (c) 2018-2020, Shiqi Yu, all rights reserved.
shiqi.yu@gmail.com
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are disclaimed.
In no event shall copyright holders or contributors be liable for any direct,
indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
*/
#pragma once
#include "facedetection_export.h"
//#define _ENABLE_AVX512 //Please enable it if X64 CPU
//#define _ENABLE_AVX2 //Please enable it if X64 CPU
//#define _ENABLE_NEON //Please enable it if ARM CPU
FACEDETECTION_EXPORT int * facedetect_cnn(
int * result_buffer, //buffer memory for storing face detection results, must have at least (256 * 15 + 1) int positions
unsigned char * rgb_image_data,
int width,
int height,
int step); //input image, it must be BGR (three channels) insteed of RGB image!
/*
DO NOT EDIT the following code if you don't really understand it.
*/
#if defined(_ENABLE_AVX512) || defined(_ENABLE_AVX2)
#include <immintrin.h>
#endif
#if defined(_ENABLE_NEON)
#include "arm_neon.h"
//NEON does not support UINT8*INT8 dot product
//to conver the input data to range [0, 127],
//and then use INT8*INT8 dot product
#define _MAX_UINT8_VALUE 127
#else
#define _MAX_UINT8_VALUE 255
#endif
#if defined(_ENABLE_AVX512)
#define _MALLOC_ALIGN 512
#elif defined(_ENABLE_AVX2)
#define _MALLOC_ALIGN 256
#else
#define _MALLOC_ALIGN 128
#endif
#if defined(_ENABLE_AVX512)&& defined(_ENABLE_NEON)
#error Cannot enable the two of AVX512 and NEON at the same time.
#endif
#if defined(_ENABLE_AVX2)&& defined(_ENABLE_NEON)
#error Cannot enable the two of AVX and NEON at the same time.
#endif
#if defined(_ENABLE_AVX512)&& defined(_ENABLE_AVX2)
#error Cannot enable the two of AVX512 and AVX2 at the same time.
#endif
#if defined(_OPENMP)
#include <omp.h>
#endif
#include <string.h>
#include <vector>
#include <iostream>
#include <typeinfo>
using namespace std;
void* myAlloc(size_t size);
void myFree_(void* ptr);
#define myFree(ptr) (myFree_(*(ptr)), *(ptr)=0);
#ifndef MIN
# define MIN(a,b) ((a) > (b) ? (b) : (a))
#endif
#ifndef MAX
# define MAX(a,b) ((a) < (b) ? (b) : (a))
#endif
typedef struct FaceRect_
{
float score;
int x;
int y;
int w;
int h;
int lm[10];
}FaceRect;
typedef struct ConvInfoStruct_ {
int pad;
int stride;
int kernel_size;
int channels;
int num;
float scale;
signed char* pWeights;
signed int* pBias;
}ConvInfoStruct;
template <class T>
class CDataBlob
{
public:
T * data;
int width;
int height;
int channels;
int channelStep;
float scale;
//when the datablob is a filter, the bias is 0 by default
//if it is the filted data, the bias is 1 by default
int bias;
public:
CDataBlob() {
data = 0;
width = 0;
height = 0;
channels = 0;
channelStep = 0;
scale = 1.0f;
bias = 0;
}
CDataBlob(int w, int h, int c)
{
data = 0;
create(w, h, c);
}
~CDataBlob()
{
setNULL();
}
void setNULL()
{
if (data)
myFree(&data);
width = height = channels = channelStep = 0;
scale = 1.0f;
}
bool create(int w, int h, int c)
{
setNULL();
width = w;
height = h;
channels = c;
bias = 0;
//alloc space for int8 array
int remBytes = (sizeof(T)* channels) % (_MALLOC_ALIGN / 8);
if (remBytes == 0)
this->channelStep = channels * sizeof(T);
else
this->channelStep = (channels * sizeof(T)) + (_MALLOC_ALIGN / 8) - remBytes;
data = (T*)myAlloc(size_t(width) * height * this->channelStep);
if (data == NULL)
{
cerr << "Failed to alloc memeory for uint8 data blob: "
<< width << "*"
<< height << "*"
<< channels << endl;
return false;
}
//memset(data, 0, width * height * channelStep);
//the following code is faster than memset
//but not only the padding bytes are set to zero.
//BE CAREFUL!!!
//#if defined(_OPENMP)
//#pragma omp parallel for
//#endif
for (int r = 0; r < this->height; r++)
{
for (int c = 0; c < this->width; c++)
{
int pixel_end = this->channelStep / sizeof(T);
T * pI = (this->data + (size_t(r) * this->width + c) * this->channelStep /sizeof(T));
for (int ch = this->channels; ch < pixel_end; ch++)
pI[ch] = 0;
}
}
return true;
}
bool setInt8FilterData(signed char * pData, int bias, int dataWidth, int dataHeight, int dataChannels)
{
if (pData == NULL)
{
cerr << "The input image data is null." << endl;
return false;
}
if (typeid(signed char) != typeid(T))
{
cerr << "Data must be signed char, the same with the source data." << endl;
return false;
}
if (dataWidth != this->width ||
dataHeight != this->height ||
dataChannels != this->channels)
{
cerr << "The dimension of the data can not match that of the Blob." << endl;
return false;
}
for(int row = 0; row < height; row++)
for (int col = 0; col < width; col++)
{
T * p = (this->data + (size_t(width) * row + col) * channelStep /sizeof(T));
for (int ch = 0; ch < channels; ch++)
{
p[ch] = pData[ch * height * width + row * width + col];
}
}
this->bias = bias;
return true;
}
bool setDataFrom3x3S2P1to1x1S1P0FromImage(const unsigned char * imgData, int imgWidth, int imgHeight, int imgChannels, int imgWidthStep)
{
if (imgData == NULL)
{
cerr << "The input image data is null." << endl;
return false;
}
if (typeid(unsigned char) != typeid(T))
{
cerr << "Data must be unsigned char, the same with the source data." << endl;
return false;
}
if (imgChannels != 3)
{
cerr << "The input image must be a 3-channel RGB image." << endl;
return false;
}
create((imgWidth+1)/2, (imgHeight+1)/2, 27);
//since the pixel assignment cannot fill all the elements in the blob.
//some elements in the blob should be initialized to 0
memset(data, 0, size_t(width) * height * channelStep);
#if defined(_OPENMP)
#pragma omp parallel for
#endif
for (int r = 0; r < this->height; r++)
{
for (int c = 0; c < this->width; c++)
{
T * pData = (unsigned char*)this->data + (size_t(r) * this->width + c) * this->channelStep;
for (int fy = -1; fy <= 1; fy++)
{
int srcy = r * 2 + fy;
if (srcy < 0 || srcy >= imgHeight) //out of the range of the image
continue;
for (int fx = -1; fx <= 1; fx++)
{
int srcx = c * 2 + fx;
if (srcx < 0 || srcx >= imgWidth) //out of the range of the image
continue;
const unsigned char * pImgData = imgData + size_t(imgWidthStep) * srcy + imgChannels * srcx;
int output_channel_offset = ((fy + 1) * 3 + fx + 1) * 3; //3x3 filters, 3-channel image
#if defined(_ENABLE_NEON)
pData[output_channel_offset] = (pImgData[0] / 2);
pData[output_channel_offset + 1] = (pImgData[1] / 2);
pData[output_channel_offset + 2] = (pImgData[2] / 2);
#else
pData[output_channel_offset] = (pImgData[0]);
pData[output_channel_offset+1] = (pImgData[1]);
pData[output_channel_offset+2] = (pImgData[2]);
#endif
}
}
}
}
#if defined(_ENABLE_NEON)
this->bias = 1; // 1/2 = 0
this->scale = 0.5f;
#else
this->bias = 1;
this->scale = 1.0f;
#endif
return true;
}
T getElement(int x, int y, int channel)
{
if (this->data)
{
if (x >= 0 && x < this->width &&
y >= 0 && y < this->height &&
channel >= 0 && channel < this->channels)
{
T * p = this->data + (size_t(y) * this->width + x) * this->channelStep/sizeof(T);
return (p[channel]);
}
}
return (T)(0);
}
friend ostream &operator<<(ostream &output, const CDataBlob &dataBlob)
{
output << "DataBlob Size (Width, Height, Channel, scale) = ("
<< dataBlob.width
<< ", " << dataBlob.height
<< ", " << dataBlob.channels
<< ", " << dataBlob.scale
<< ", " << dataBlob.bias
<< ")" << endl;
for (int ch = 0; ch < dataBlob.channels; ch++)
{
output << "Channel " << ch << ": " << endl;
for (int row = 0; row < dataBlob.height; row++)
{
output << "(";
for (int col = 0; col < dataBlob.width; col++)
{
T * p = (dataBlob.data + (dataBlob.width * row + col) * dataBlob.channelStep /sizeof(T) );
if(sizeof(T)<4)
output << (int)(p[ch]);
else
output << p[ch];
if (col != dataBlob.width - 1)
output << ", ";
}
output << ")" << endl;
}
}
return output;
}
};
class Filters {
public:
vector<CDataBlob<signed char> *> filters;
int pad;
int stride;
float scale; //element * scale = original value
Filters()
{
pad = 0;
stride = 0;
scale = 0;
}
~Filters()
{
for (int i = 0; i < filters.size(); i++)
{
delete filters[i];
filters[i] = 0;
}
}
};
bool convertInt2Float(CDataBlob<int> * inputData, CDataBlob<float> * outputData);
bool convolution(CDataBlob<unsigned char> *inputData, const Filters* filters, CDataBlob<int> *outputData);
bool convolution_relu(CDataBlob<unsigned char> *inputData, const Filters* filters, CDataBlob<unsigned char> *outputData);
bool maxpooling2x2S2(const CDataBlob<unsigned char> *inputData, CDataBlob<unsigned char> *outputData);
bool priorbox(const CDataBlob<unsigned char> * featureData, int img_width, int img_height, int step, int num_sizes, float * pWinSizes, CDataBlob<float> * outputData);
template<typename T>
bool concat4(const CDataBlob<T> *inputData1, const CDataBlob<T> *inputData2, const CDataBlob<T> *inputData3, const CDataBlob<T> *inputData4, CDataBlob<T> *outputData);
/* the input data for softmax must be a vector, the data stored in a multi-channel blob with size 1x1 */
template<typename T>
bool blob2vector(const CDataBlob<T> * inputData, CDataBlob<T> * outputData);
bool softmax1vector2class(CDataBlob<float> *inputOutputData);
bool detection_output(const CDataBlob<float> * priorbox, const CDataBlob<float> * loc, const CDataBlob<float> * conf, float overlap_threshold, float confidence_threshold, int top_k, int keep_top_k, CDataBlob<float> * outputData);
vector<FaceRect> objectdetect_cnn(unsigned char * rgbImageData, int with, int height, int step);
|
vacation.c | /* =============================================================================
*
* vacation.c
*
* =============================================================================
*
* Copyright (C) Stanford University, 2006. All Rights Reserved.
* Author: Chi Cao Minh
*
* =============================================================================
*
* For the license of bayes/sort.h and bayes/sort.c, please see the header
* of the files.
*
* ------------------------------------------------------------------------
*
* For the license of kmeans, please see kmeans/LICENSE.kmeans
*
* ------------------------------------------------------------------------
*
* For the license of ssca2, please see ssca2/COPYRIGHT
*
* ------------------------------------------------------------------------
*
* For the license of lib/mt19937ar.c and lib/mt19937ar.h, please see the
* header of the files.
*
* ------------------------------------------------------------------------
*
* For the license of lib/rbtree.h and lib/rbtree.c, please see
* lib/LEGALNOTICE.rbtree and lib/LICENSE.rbtree
*
* ------------------------------------------------------------------------
*
* Unless otherwise noted, the following license applies to STAMP files:
*
* Copyright (c) 2007, Stanford University
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Stanford University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY STANFORD UNIVERSITY ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL STANFORD UNIVERSITY BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* =============================================================================
*/
#include <assert.h>
#include <stdlib.h>
#include <stdio.h>
#include <getopt.h>
#include "client.h"
#include "customer.h"
#include "list.h"
#include "manager.h"
#include "map.h"
#include "memory.h"
#include "operation.h"
#include "random.h"
#include "reservation.h"
#include "thread.h"
#include "timer.h"
#include "tm.h"
#include "types.h"
#include "utility.h"
enum param_types {
PARAM_CLIENTS = (unsigned char)'c',
PARAM_NUMBER = (unsigned char)'n',
PARAM_QUERIES = (unsigned char)'q',
PARAM_RELATIONS = (unsigned char)'r',
PARAM_TRANSACTIONS = (unsigned char)'t',
PARAM_USER = (unsigned char)'u',
};
#define PARAM_DEFAULT_CLIENTS (1)
#define PARAM_DEFAULT_NUMBER (10)
#define PARAM_DEFAULT_QUERIES (90)
#define PARAM_DEFAULT_RELATIONS (1 << 16)
#define PARAM_DEFAULT_TRANSACTIONS (1 << 26)
#define PARAM_DEFAULT_USER (80)
double global_params[256]; /* 256 = ascii limit */
/* =============================================================================
* displayUsage
* =============================================================================
*/
static void
displayUsage (const char* appName)
{
printf("Usage: %s [options]\n", appName);
puts("\nOptions: (defaults)\n");
printf(" c <UINT> Number of [c]lients (%i)\n",
PARAM_DEFAULT_CLIENTS);
printf(" n <UINT> [n]umber of user queries/transaction (%i)\n",
PARAM_DEFAULT_NUMBER);
printf(" q <UINT> Percentage of relations [q]ueried (%i)\n",
PARAM_DEFAULT_QUERIES);
printf(" r <UINT> Number of possible [r]elations (%i)\n",
PARAM_DEFAULT_RELATIONS);
printf(" t <UINT> Number of [t]ransactions (%i)\n",
PARAM_DEFAULT_TRANSACTIONS);
printf(" u <UINT> Percentage of [u]ser transactions (%i)\n",
PARAM_DEFAULT_USER);
exit(1);
}
/* =============================================================================
* setDefaultParams
* =============================================================================
*/
static void
setDefaultParams ()
{
global_params[PARAM_CLIENTS] = PARAM_DEFAULT_CLIENTS;
global_params[PARAM_NUMBER] = PARAM_DEFAULT_NUMBER;
global_params[PARAM_QUERIES] = PARAM_DEFAULT_QUERIES;
global_params[PARAM_RELATIONS] = PARAM_DEFAULT_RELATIONS;
global_params[PARAM_TRANSACTIONS] = PARAM_DEFAULT_TRANSACTIONS;
global_params[PARAM_USER] = PARAM_DEFAULT_USER;
}
/* =============================================================================
* parseArgs
* =============================================================================
*/
static void
parseArgs (long argc, char* const argv[])
{
long i;
long opt;
opterr = 0;
setDefaultParams();
while ((opt = getopt(argc, argv, "c:n:q:r:t:u:")) != -1) {
switch (opt) {
case 'c':
case 'n':
case 'q':
case 'r':
case 't':
case 'u':
global_params[(unsigned char)opt] = atol(optarg);
break;
case '?':
default:
opterr++;
break;
}
}
for (i = optind; i < argc; i++) {
fprintf(stderr, "Non-option argument: %s\n", argv[i]);
opterr++;
}
if (opterr) {
displayUsage(argv[0]);
}
}
/* =============================================================================
* addCustomer
* -- Wrapper function
* =============================================================================
*/
static bool_t
addCustomer (manager_t* managerPtr, long id, long num, long price)
{
return manager_addCustomer_seq(managerPtr, id);
}
/* =============================================================================
* initializeManager
* =============================================================================
*/
static manager_t*
initializeManager ()
{
manager_t* managerPtr;
long i;
long numRelation;
random_t* randomPtr;
long* ids;
bool_t (*manager_add[])( manager_t*, long, long, long) = {
&manager_addCar_seq,
&manager_addFlight_seq,
&manager_addRoom_seq,
&addCustomer
};
long t;
long numTable = sizeof(manager_add) / sizeof(manager_add[0]);
printf("Initializing manager... ");
fflush(stdout);
randomPtr = random_alloc();
assert(randomPtr != NULL);
managerPtr = manager_alloc();
assert(managerPtr != NULL);
numRelation = (long)global_params[PARAM_RELATIONS];
ids = (long*)SEQ_MALLOC(numRelation * sizeof(long));
for (i = 0; i < numRelation; i++) {
ids[i] = i + 1;
}
for (t = 0; t < numTable; t++) {
/* Shuffle ids */
for (i = 0; i < numRelation; i++) {
long x = random_generate(randomPtr) % numRelation;
long y = random_generate(randomPtr) % numRelation;
long tmp = ids[x];
ids[x] = ids[y];
ids[y] = tmp;
}
/* Populate table */
for (i = 0; i < numRelation; i++) {
bool_t status;
long id = ids[i];
long num = ((random_generate(randomPtr) % 5) + 1) * 100;
long price = ((random_generate(randomPtr) % 5) * 10) + 50;
status = manager_add[t](managerPtr, id, num, price);
assert(status);
}
} /* for t */
puts("done.");
fflush(stdout);
random_free(randomPtr);
SEQ_FREE(ids);
return managerPtr;
}
/* =============================================================================
* initializeClients
* =============================================================================
*/
static client_t**
initializeClients (manager_t* managerPtr)
{
random_t* randomPtr;
client_t** clients;
long i;
long numClient = (long)global_params[PARAM_CLIENTS];
long numTransaction = (long)global_params[PARAM_TRANSACTIONS];
long numTransactionPerClient;
long numQueryPerTransaction = (long)global_params[PARAM_NUMBER];
long numRelation = (long)global_params[PARAM_RELATIONS];
long percentQuery = (long)global_params[PARAM_QUERIES];
long queryRange;
long percentUser = (long)global_params[PARAM_USER];
printf("Initializing clients... ");
fflush(stdout);
randomPtr = random_alloc();
assert(randomPtr != NULL);
clients = (client_t**)SEQ_MALLOC(numClient * sizeof(client_t*));
assert(clients != NULL);
numTransactionPerClient = (long)((double)numTransaction / (double)numClient + 0.5);
queryRange = (long)((double)percentQuery / 100.0 * (double)numRelation + 0.5);
for (i = 0; i < numClient; i++) {
clients[i] = client_alloc(i,
managerPtr,
numTransactionPerClient,
numQueryPerTransaction,
queryRange,
percentUser);
assert(clients[i] != NULL);
}
puts("done.");
printf(" Transactions = %li\n", numTransaction);
printf(" Clients = %li\n", numClient);
printf(" Transactions/client = %li\n", numTransactionPerClient);
printf(" Queries/transaction = %li\n", numQueryPerTransaction);
printf(" Relations = %li\n", numRelation);
printf(" Query percent = %li\n", percentQuery);
printf(" Query range = %li\n", queryRange);
printf(" Percent user = %li\n", percentUser);
fflush(stdout);
random_free(randomPtr);
return clients;
}
/* =============================================================================
* checkTables
* -- some simple checks (not comprehensive)
* -- dependent on tasks generated for clients in initializeClients()
* =============================================================================
*/
static void
checkTables (manager_t* managerPtr)
{
long i;
long numRelation = (long)global_params[PARAM_RELATIONS];
MAP_T* customerTablePtr = managerPtr->customerTablePtr;
MAP_T* tables[] = {
managerPtr->carTablePtr,
managerPtr->flightTablePtr,
managerPtr->roomTablePtr,
};
long numTable = sizeof(tables) / sizeof(tables[0]);
bool_t (*manager_add[])(manager_t*, long, long, long) = {
&manager_addCar_seq,
&manager_addFlight_seq,
&manager_addRoom_seq
};
long t;
printf("Checking tables... ");
fflush(stdout);
/* Check for unique customer IDs */
long percentQuery = (long)global_params[PARAM_QUERIES];
long queryRange = (long)((double)percentQuery / 100.0 * (double)numRelation + 0.5);
long maxCustomerId = queryRange + 1;
for (i = 1; i <= maxCustomerId; i++) {
if (MAP_FIND(customerTablePtr, i)) {
if (MAP_REMOVE(customerTablePtr, i)) {
assert(!MAP_FIND(customerTablePtr, i));
}
}
}
/* Check reservation tables for consistency and unique ids */
for (t = 0; t < numTable; t++) {
MAP_T* tablePtr = tables[t];
for (i = 1; i <= numRelation; i++) {
if (MAP_FIND(tablePtr, i)) {
assert(manager_add[t](managerPtr, i, 0, 0)); /* validate entry */
if (MAP_REMOVE(tablePtr, i)) {
assert(!MAP_REMOVE(tablePtr, i));
}
}
}
}
puts("done.");
fflush(stdout);
}
/* =============================================================================
* freeClients
* =============================================================================
*/
static void
freeClients (client_t** clients)
{
long i;
long numClient = (long)global_params[PARAM_CLIENTS];
for (i = 0; i < numClient; i++) {
client_t* clientPtr = clients[i];
client_free(clientPtr);
}
}
/* =============================================================================
* main
* =============================================================================
*/
MAIN(argc, argv)
{
manager_t* managerPtr;
client_t** clients;
TIMER_T start;
TIMER_T stop;
/* Initialization */
parseArgs(argc, (char** const)argv);
SIM_GET_NUM_CPU(global_params[PARAM_CLIENTS]);
managerPtr = initializeManager();
assert(managerPtr != NULL);
clients = initializeClients(managerPtr);
assert(clients != NULL);
long numThread = global_params[PARAM_CLIENTS];
TM_STARTUP(numThread);
P_MEMORY_STARTUP(numThread);
thread_startup(numThread);
/* Run transactions */
printf("Running clients... ");
fflush(stdout);
GOTO_SIM();
TIMER_READ(start);
#ifdef OTM
#pragma omp parallel
{
client_run(clients);
}
#else
thread_start(client_run, (void*)clients);
#endif
TIMER_READ(stop);
GOTO_REAL();
puts("done.");
printf("Time = %0.6lf\n",
TIMER_DIFF_SECONDS(start, stop));
fflush(stdout);
checkTables(managerPtr);
/* Clean up */
printf("Deallocating memory... ");
fflush(stdout);
freeClients(clients);
/*
* TODO: The contents of the manager's table need to be deallocated.
*/
manager_free(managerPtr);
puts("done.");
fflush(stdout);
TM_SHUTDOWN();
P_MEMORY_SHUTDOWN();
thread_shutdown();
MAIN_RETURN(0);
}
/* =============================================================================
*
* End of vacation.c
*
* =============================================================================
*/
|
dcraw_lz.c | /*
* OpenMP multithread version of dcraw
* based on UFRaw dcraw_indi.c by Udi Fuchs
*/
/*
dcraw.c -- Dave Coffin's raw photo decoder
Copyright 1997-2018 by Dave Coffin, dcoffin a cybercom o net
This is a command-line ANSI C program to convert raw photos from
any digital camera on any computer running any operating system.
No license is required to download and use dcraw.c. However,
to lawfully redistribute dcraw, you must either (a) offer, at
no extra charge, full source code* for all executable files
containing RESTRICTED functions, (b) distribute this code under
the GPL Version 2 or later, (c) remove all RESTRICTED functions,
re-implement them, or copy them from an earlier, unrestricted
Revision of dcraw.c, or (d) purchase a license from the author.
The functions that process Foveon images have been RESTRICTED
since Revision 1.237. All other code remains free for all uses.
*If you have not modified dcraw.c in any way, a link to my
homepage qualifies as "full source code".
$Revision: 1.478 $
$Date: 2018/06/01 20:36:25 $
*/
#define DCRAW_VERSION "9.28"
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#define _USE_MATH_DEFINES
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
#include <float.h>
#include <limits.h>
#include <math.h>
#include <setjmp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <sys/types.h>
#if defined(DJGPP) || defined(__MINGW32__) || _MSC_VER <= 1900
#define fseeko fseek
#define ftello ftell
#else
#define fgetc getc_unlocked
#endif
#ifdef __CYGWIN__
#include <io.h>
#endif
#ifdef WIN32
#include <sys/utime.h>
#include <winsock2.h>
#pragma comment(lib, "ws2_32.lib")
#define snprintf _snprintf
#define strcasecmp stricmp
#define strncasecmp strnicmp
typedef __int64 INT64;
typedef unsigned __int64 UINT64;
#else
#include <unistd.h>
#include <utime.h>
#include <netinet/in.h>
typedef long long INT64;
typedef unsigned long long UINT64;
#endif
#ifdef NODEPS
#define NO_JASPER
#define NO_JPEG
#define NO_LCMS
#endif
#ifndef NO_JASPER
#include <jasper/jasper.h> /* Decode Red camera movies */
#endif
#ifndef NO_JPEG
#include <jpeglib.h> /* Decode compressed Kodak DC120 photos */
#endif /* and Adobe Lossy DNGs */
#ifndef NO_LCMS
#include <lcms2.h> /* Support color profiles */
#endif
#ifdef LOCALEDIR
#include <libintl.h>
#define _(String) gettext(String)
#else
#define _(String) (String)
#endif
#if !defined(uchar)
#define uchar unsigned char
#endif
#if !defined(ushort)
#define ushort unsigned short
#endif
#ifdef _OPENMP
#include <omp.h>
#endif
/*
All global variables are defined here, and all functions that
access them are prefixed with "CLASS". For thread-safety, all
non-const static local variables except cbrt[] must be declared
"thread_local".
*/
FILE *ifp, *ofp;
short order;
const char *ifname;
char *meta_data, xtrans[6][6], xtrans_abs[6][6];
char cdesc[5], desc[512], make[64], model[64], model2[64], artist[64];
float flash_used, canon_ev, iso_speed, shutter, aperture, focal_len;
time_t timestamp;
off_t strip_offset, data_offset;
off_t thumb_offset, meta_offset, profile_offset;
unsigned shot_order, kodak_cbpp, exif_cfa, unique_id;
unsigned thumb_length, meta_length, profile_length;
unsigned thumb_misc, *oprof, fuji_layout, shot_select=0, multi_out=0;
unsigned tiff_nifds, tiff_samples, tiff_bps, tiff_compress;
unsigned black, maximum, mix_green, raw_color, zero_is_bad;
unsigned zero_after_ff, is_raw, dng_version, is_foveon, data_error;
unsigned tile_width, tile_length, gpsdata[32], load_flags;
unsigned flip, tiff_flip, filters, colors;
ushort raw_height, raw_width, height, width, top_margin, left_margin;
ushort shrink, iheight, iwidth, fuji_width, thumb_width, thumb_height;
ushort *raw_image, (*image)[4], cblack[4102];
ushort white[8][8], curve[0x10000], cr2_slice[3], sraw_mul[4];
double pixel_aspect, aber[4]={1,1,1,1}, gamm[6]={ 0.45,4.5,0,0,0,0 };
float bright=1, user_mul[4]={0,0,0,0}, threshold=0;
int mask[8][4];
int half_size=0, four_color_rgb=0, document_mode=0, highlight=0;
int verbose=0, use_auto_wb=0, use_camera_wb=0, use_camera_matrix=1;
int output_color=1, output_bps=8, output_tiff=0, med_passes=0;
int no_auto_bright=0;
unsigned greybox[4] = { 0, 0, UINT_MAX, UINT_MAX };
float cam_mul[4], pre_mul[4], cmatrix[3][4], rgb_cam[3][4];
#ifndef LIGHTZONE
const
#endif
double xyz_rgb[3][3] = { /* XYZ from RGB */
{ 0.412453, 0.357580, 0.180423 },
{ 0.212671, 0.715160, 0.072169 },
{ 0.019334, 0.119193, 0.950227 } };
const float d65_white[3] = { 0.950456, 1, 1.088754 };
int histogram[4][0x2000];
void (*write_thumb)(), (*write_fun)();
void (*load_raw)(), (*thumb_load_raw)();
jmp_buf failure;
struct decode {
struct decode *branch[2];
int leaf;
} first_decode[2048], *second_decode, *free_decode;
struct tiff_ifd {
int width, height, bps, comp, phint, offset, flip, samples, bytes;
int tile_width, tile_length;
float shutter;
} tiff_ifd[10];
struct ph1 {
int format, key_off, tag_21a;
int black, split_col, black_col, split_row, black_row;
float tag_210;
} ph1;
#define CLASS
#define FORC(cnt) for (c=0; c < cnt; c++)
#define FORC3 FORC(3)
#define FORC4 FORC(4)
#define FORCC FORC(colors)
#define SQR(x) ((x)*(x))
#define ABS(x) (((int)(x) ^ ((int)(x) >> 31)) - ((int)(x) >> 31))
#define MIN(a,b) ((a) < (b) ? (a) : (b))
#define MAX(a,b) ((a) > (b) ? (a) : (b))
#define LIM(x,min,max) MAX(min,MIN(x,max))
#define ULIM(x,y,z) ((y) < (z) ? LIM(x,y,z) : LIM(x,z,y))
#define CLIP(x) LIM((int)(x),0,65535)
#define SWAP(a,b) { a=a+b; b=a-b; a=a-b; }
/*
In order to inline this calculation, I make the risky
assumption that all filter patterns can be described
by a repeating pattern of eight rows and two columns
Do not use the FC or BAYER macros with the Leaf CatchLight,
because its pattern is 16x16, not 2x8.
Return values are either 0/1/2/3 = G/M/C/Y or 0/1/2/3 = R/G1/B/G2
PowerShot 600 PowerShot A50 PowerShot Pro70 Pro90 & G1
0xe1e4e1e4: 0x1b4e4b1e: 0x1e4b4e1b: 0xb4b4b4b4:
0 1 2 3 4 5 0 1 2 3 4 5 0 1 2 3 4 5 0 1 2 3 4 5
0 G M G M G M 0 C Y C Y C Y 0 Y C Y C Y C 0 G M G M G M
1 C Y C Y C Y 1 M G M G M G 1 M G M G M G 1 Y C Y C Y C
2 M G M G M G 2 Y C Y C Y C 2 C Y C Y C Y
3 C Y C Y C Y 3 G M G M G M 3 G M G M G M
4 C Y C Y C Y 4 Y C Y C Y C
PowerShot A5 5 G M G M G M 5 G M G M G M
0x1e4e1e4e: 6 Y C Y C Y C 6 C Y C Y C Y
7 M G M G M G 7 M G M G M G
0 1 2 3 4 5
0 C Y C Y C Y
1 G M G M G M
2 C Y C Y C Y
3 M G M G M G
All RGB cameras use one of these Bayer grids:
0x16161616: 0x61616161: 0x49494949: 0x94949494:
0 1 2 3 4 5 0 1 2 3 4 5 0 1 2 3 4 5 0 1 2 3 4 5
0 B G B G B G 0 G R G R G R 0 G B G B G B 0 R G R G R G
1 G R G R G R 1 B G B G B G 1 R G R G R G 1 G B G B G B
2 B G B G B G 2 G R G R G R 2 G B G B G B 2 R G R G R G
3 G R G R G R 3 B G B G B G 3 R G R G R G 3 G B G B G B
*/
#define RAW(row,col) \
raw_image[(row)*raw_width+(col)]
#define FC(row,col) \
(filters >> ((((row) << 1 & 14) + ((col) & 1)) << 1) & 3)
#define BAYER(row,col) \
image[((row) >> shrink)*iwidth + ((col) >> shrink)][FC(row,col)]
#define BAYER2(row,col) \
image[((row) >> shrink)*iwidth + ((col) >> shrink)][fcol(row,col)]
int CLASS fcol (int row, int col)
{
static const char filter[16][16] =
{ { 2,1,1,3,2,3,2,0,3,2,3,0,1,2,1,0 },
{ 0,3,0,2,0,1,3,1,0,1,1,2,0,3,3,2 },
{ 2,3,3,2,3,1,1,3,3,1,2,1,2,0,0,3 },
{ 0,1,0,1,0,2,0,2,2,0,3,0,1,3,2,1 },
{ 3,1,1,2,0,1,0,2,1,3,1,3,0,1,3,0 },
{ 2,0,0,3,3,2,3,1,2,0,2,0,3,2,2,1 },
{ 2,3,3,1,2,1,2,1,2,1,1,2,3,0,0,1 },
{ 1,0,0,2,3,0,0,3,0,3,0,3,2,1,2,3 },
{ 2,3,3,1,1,2,1,0,3,2,3,0,2,3,1,3 },
{ 1,0,2,0,3,0,3,2,0,1,1,2,0,1,0,2 },
{ 0,1,1,3,3,2,2,1,1,3,3,0,2,1,3,2 },
{ 2,3,2,0,0,1,3,0,2,0,1,2,3,0,1,0 },
{ 1,3,1,2,3,2,3,2,0,2,0,1,1,0,3,0 },
{ 0,2,0,3,1,0,0,1,1,3,3,2,3,2,2,1 },
{ 2,1,3,2,3,1,2,1,0,3,0,2,0,2,0,2 },
{ 0,3,1,0,0,2,0,3,2,1,3,1,1,3,1,3 } };
if (filters == 1) return filter[(row+top_margin)&15][(col+left_margin)&15];
if (filters == 9) return xtrans[(row+6) % 6][(col+6) % 6];
return FC(row,col);
}
#ifndef __GLIBC__
char *my_memmem (char *haystack, size_t haystacklen,
char *needle, size_t needlelen)
{
char *c;
for (c = haystack; c <= haystack + haystacklen - needlelen; c++)
if (!memcmp (c, needle, needlelen))
return c;
return 0;
}
#define memmem my_memmem
char *my_strcasestr (char *haystack, const char *needle)
{
char *c;
for (c = haystack; *c; c++)
if (!strncasecmp(c, needle, strlen(needle)))
return c;
return 0;
}
#define strcasestr my_strcasestr
#endif
void CLASS merror (void *ptr, const char *where)
{
if (ptr) return;
fprintf (stderr,_("%s: Out of memory in %s\n"), ifname, where);
longjmp (failure, 1);
}
void CLASS derror()
{
if (!data_error) {
fprintf (stderr, "%s: ", ifname);
if (feof(ifp))
fprintf (stderr,_("Unexpected end of file\n"));
else
fprintf (stderr,_("Corrupt data near 0x%llx\n"), (INT64) ftello(ifp));
}
data_error++;
}
ushort CLASS sget2 (uchar *s)
{
if (order == 0x4949) /* "II" means little-endian */
return s[0] | s[1] << 8;
else /* "MM" means big-endian */
return s[0] << 8 | s[1];
}
ushort CLASS get2()
{
uchar str[2] = { 0xff,0xff };
fread (str, 1, 2, ifp);
return sget2(str);
}
unsigned CLASS sget4 (uchar *s)
{
if (order == 0x4949)
return s[0] | s[1] << 8 | s[2] << 16 | s[3] << 24;
else
return s[0] << 24 | s[1] << 16 | s[2] << 8 | s[3];
}
#define sget4(s) sget4((uchar *)s)
unsigned CLASS get4()
{
uchar str[4] = { 0xff,0xff,0xff,0xff };
fread (str, 1, 4, ifp);
return sget4(str);
}
unsigned CLASS getint (int type)
{
return type == 3 ? get2() : get4();
}
float CLASS int_to_float (int i)
{
union { int i; float f; } u;
u.i = i;
return u.f;
}
double CLASS getreal (int type)
{
union { char c[8]; double d; } u;
int i, rev;
switch (type) {
case 3: return (unsigned short) get2();
case 4: return (unsigned int) get4();
case 5: u.d = (unsigned int) get4();
return u.d / (unsigned int) get4();
case 8: return (signed short) get2();
case 9: return (signed int) get4();
case 10: u.d = (signed int) get4();
return u.d / (signed int) get4();
case 11: return int_to_float (get4());
case 12:
rev = 7 * ((order == 0x4949) == (ntohs(0x1234) == 0x1234));
for (i=0; i < 8; i++)
u.c[i ^ rev] = fgetc(ifp);
return u.d;
default: return fgetc(ifp);
}
}
void CLASS read_shorts (ushort *pixel, int count)
{
if (fread (pixel, 2, count, ifp) < count) derror();
if ((order == 0x4949) == (ntohs(0x1234) == 0x1234))
swab (pixel, pixel, count*2);
}
void CLASS cubic_spline (const int *x_, const int *y_, const int len)
{
float **A, *b, *c, *d, *x, *y;
int i, j;
A = (float **) calloc (((2*len + 4)*sizeof **A + sizeof *A), 2*len);
if (!A) return;
A[0] = (float *) (A + 2*len);
for (i = 1; i < 2*len; i++)
A[i] = A[0] + 2*len*i;
y = len + (x = i + (d = i + (c = i + (b = A[0] + i*i))));
for (i = 0; i < len; i++) {
x[i] = x_[i] / 65535.0;
y[i] = y_[i] / 65535.0;
}
for (i = len-1; i > 0; i--) {
b[i] = (y[i] - y[i-1]) / (x[i] - x[i-1]);
d[i-1] = x[i] - x[i-1];
}
for (i = 1; i < len-1; i++) {
A[i][i] = 2 * (d[i-1] + d[i]);
if (i > 1) {
A[i][i-1] = d[i-1];
A[i-1][i] = d[i-1];
}
A[i][len-1] = 6 * (b[i+1] - b[i]);
}
for(i = 1; i < len-2; i++) {
float v = A[i+1][i] / A[i][i];
for(j = 1; j <= len-1; j++)
A[i+1][j] -= v * A[i][j];
}
for(i = len-2; i > 0; i--) {
float acc = 0;
for(j = i; j <= len-2; j++)
acc += A[i][j]*c[j];
c[i] = (A[i][len-1] - acc) / A[i][i];
}
for (i = 0; i < 0x10000; i++) {
float x_out = (float)(i / 65535.0);
float y_out = 0;
for (j = 0; j < len-1; j++) {
if (x[j] <= x_out && x_out <= x[j+1]) {
float v = x_out - x[j];
y_out = y[j] +
((y[j+1] - y[j]) / d[j] - (2 * d[j] * c[j] + c[j+1] * d[j])/6) * v
+ (c[j] * 0.5) * v*v + ((c[j+1] - c[j]) / (6 * d[j])) * v*v*v;
}
}
curve[i] = y_out < 0.0 ? 0 : (y_out >= 1.0 ? 65535 :
(ushort)(y_out * 65535.0 + 0.5));
}
free (A);
}
void CLASS canon_600_fixed_wb (int temp)
{
static const short mul[4][5] = {
{ 667, 358,397,565,452 },
{ 731, 390,367,499,517 },
{ 1119, 396,348,448,537 },
{ 1399, 485,431,508,688 } };
int lo, hi, i;
float frac=0;
for (lo=4; --lo; )
if (*mul[lo] <= temp) break;
for (hi=0; hi < 3; hi++)
if (*mul[hi] >= temp) break;
if (lo != hi)
frac = (float) (temp - *mul[lo]) / (*mul[hi] - *mul[lo]);
for (i=1; i < 5; i++)
pre_mul[i-1] = 1 / (frac * mul[hi][i] + (1-frac) * mul[lo][i]);
}
/* Return values: 0 = white 1 = near white 2 = not white */
int CLASS canon_600_color (int ratio[2], int mar)
{
int clipped=0, target, miss;
if (flash_used) {
if (ratio[1] < -104)
{ ratio[1] = -104; clipped = 1; }
if (ratio[1] > 12)
{ ratio[1] = 12; clipped = 1; }
} else {
if (ratio[1] < -264 || ratio[1] > 461) return 2;
if (ratio[1] < -50)
{ ratio[1] = -50; clipped = 1; }
if (ratio[1] > 307)
{ ratio[1] = 307; clipped = 1; }
}
target = flash_used || ratio[1] < 197
? -38 - (398 * ratio[1] >> 10)
: -123 + (48 * ratio[1] >> 10);
if (target - mar <= ratio[0] &&
target + 20 >= ratio[0] && !clipped) return 0;
miss = target - ratio[0];
if (abs(miss) >= mar*4) return 2;
if (miss < -20) miss = -20;
if (miss > mar) miss = mar;
ratio[0] = target - miss;
return 1;
}
void CLASS canon_600_auto_wb()
{
int mar, row, col, i, j, st, count[] = { 0,0 };
int test[8], total[2][8], ratio[2][2], stat[2];
memset (&total, 0, sizeof total);
i = canon_ev + 0.5;
if (i < 10) mar = 150;
else if (i > 12) mar = 20;
else mar = 280 - 20 * i;
if (flash_used) mar = 80;
for (row=14; row < height-14; row+=4)
for (col=10; col < width; col+=2) {
for (i=0; i < 8; i++)
test[(i & 4) + FC(row+(i >> 1),col+(i & 1))] =
BAYER(row+(i >> 1),col+(i & 1));
for (i=0; i < 8; i++)
if (test[i] < 150 || test[i] > 1500) goto next;
for (i=0; i < 4; i++)
if (abs(test[i] - test[i+4]) > 50) goto next;
for (i=0; i < 2; i++) {
for (j=0; j < 4; j+=2)
ratio[i][j >> 1] = ((test[i*4+j+1]-test[i*4+j]) << 10) / test[i*4+j];
stat[i] = canon_600_color (ratio[i], mar);
}
if ((st = stat[0] | stat[1]) > 1) goto next;
for (i=0; i < 2; i++)
if (stat[i])
for (j=0; j < 2; j++)
test[i*4+j*2+1] = test[i*4+j*2] * (0x400 + ratio[i][j]) >> 10;
for (i=0; i < 8; i++)
total[st][i] += test[i];
count[st]++;
next: ;
}
if (count[0] | count[1]) {
st = count[0]*200 < count[1];
for (i=0; i < 4; i++)
pre_mul[i] = 1.0 / (total[st][i] + total[st][i+4]);
}
}
void CLASS canon_600_coeff()
{
static const short table[6][12] = {
{ -190,702,-1878,2390, 1861,-1349,905,-393, -432,944,2617,-2105 },
{ -1203,1715,-1136,1648, 1388,-876,267,245, -1641,2153,3921,-3409 },
{ -615,1127,-1563,2075, 1437,-925,509,3, -756,1268,2519,-2007 },
{ -190,702,-1886,2398, 2153,-1641,763,-251, -452,964,3040,-2528 },
{ -190,702,-1878,2390, 1861,-1349,905,-393, -432,944,2617,-2105 },
{ -807,1319,-1785,2297, 1388,-876,769,-257, -230,742,2067,-1555 } };
int t=0, i, c;
float mc, yc;
mc = pre_mul[1] / pre_mul[2];
yc = pre_mul[3] / pre_mul[2];
if (mc > 1 && mc <= 1.28 && yc < 0.8789) t=1;
if (mc > 1.28 && mc <= 2) {
if (yc < 0.8789) t=3;
else if (yc <= 2) t=4;
}
if (flash_used) t=5;
for (raw_color = i=0; i < 3; i++)
FORCC rgb_cam[i][c] = table[t][i*4 + c] / 1024.0;
}
void CLASS canon_600_load_raw()
{
uchar data[1120], *dp;
ushort *pix;
int irow, row;
for (irow=row=0; irow < height; irow++) {
if (fread (data, 1, 1120, ifp) < 1120) derror();
pix = raw_image + row*raw_width;
for (dp=data; dp < data+1120; dp+=10, pix+=8) {
pix[0] = (dp[0] << 2) + (dp[1] >> 6 );
pix[1] = (dp[2] << 2) + (dp[1] >> 4 & 3);
pix[2] = (dp[3] << 2) + (dp[1] >> 2 & 3);
pix[3] = (dp[4] << 2) + (dp[1] & 3);
pix[4] = (dp[5] << 2) + (dp[9] & 3);
pix[5] = (dp[6] << 2) + (dp[9] >> 2 & 3);
pix[6] = (dp[7] << 2) + (dp[9] >> 4 & 3);
pix[7] = (dp[8] << 2) + (dp[9] >> 6 );
}
if ((row+=2) > height) row = 1;
}
}
void CLASS canon_600_correct()
{
int row, col, val;
static const short mul[4][2] =
{ { 1141,1145 }, { 1128,1109 }, { 1178,1149 }, { 1128,1109 } };
for (row=0; row < height; row++)
for (col=0; col < width; col++) {
if ((val = BAYER(row,col) - black) < 0) val = 0;
val = val * mul[row & 3][col & 1] >> 9;
BAYER(row,col) = val;
}
canon_600_fixed_wb(1311);
canon_600_auto_wb();
canon_600_coeff();
maximum = (0x3ff - black) * 1109 >> 9;
black = 0;
}
int CLASS canon_s2is()
{
unsigned row;
for (row=0; row < 100; row++) {
fseek (ifp, row*3340 + 3284, SEEK_SET);
if (getc(ifp) > 15) return 1;
}
return 0;
}
unsigned CLASS getbithuff (int nbits, ushort *huff)
{
static unsigned bitbuf=0;
static int vbits=0, reset=0;
unsigned c;
if (nbits > 25) return 0;
if (nbits < 0)
return bitbuf = vbits = reset = 0;
if (nbits == 0 || vbits < 0) return 0;
while (!reset && vbits < nbits && (c = fgetc(ifp)) != EOF &&
!(reset = zero_after_ff && c == 0xff && fgetc(ifp))) {
bitbuf = (bitbuf << 8) + (uchar) c;
vbits += 8;
}
c = bitbuf << (32-vbits) >> (32-nbits);
if (huff) {
vbits -= huff[c] >> 8;
c = (uchar) huff[c];
} else
vbits -= nbits;
if (vbits < 0) derror();
return c;
}
#define getbits(n) getbithuff(n,0)
#define gethuff(h) getbithuff(*h,h+1)
/*
Construct a decode tree according the specification in *source.
The first 16 bytes specify how many codes should be 1-bit, 2-bit
3-bit, etc. Bytes after that are the leaf values.
For example, if the source is
{ 0,1,4,2,3,1,2,0,0,0,0,0,0,0,0,0,
0x04,0x03,0x05,0x06,0x02,0x07,0x01,0x08,0x09,0x00,0x0a,0x0b,0xff },
then the code is
00 0x04
010 0x03
011 0x05
100 0x06
101 0x02
1100 0x07
1101 0x01
11100 0x08
11101 0x09
11110 0x00
111110 0x0a
1111110 0x0b
1111111 0xff
*/
ushort * CLASS make_decoder_ref (const uchar **source)
{
int max, len, h, i, j;
const uchar *count;
ushort *huff;
count = (*source += 16) - 17;
for (max=16; max && !count[max]; max--);
huff = (ushort *) calloc (1 + (1 << max), sizeof *huff);
merror (huff, "make_decoder()");
huff[0] = max;
for (h=len=1; len <= max; len++)
for (i=0; i < count[len]; i++, ++*source)
for (j=0; j < 1 << (max-len); j++)
if (h <= 1 << max)
huff[h++] = len << 8 | **source;
return huff;
}
ushort * CLASS make_decoder (const uchar *source)
{
return make_decoder_ref (&source);
}
void CLASS crw_init_tables (unsigned table, ushort *huff[2])
{
static const uchar first_tree[3][29] = {
{ 0,1,4,2,3,1,2,0,0,0,0,0,0,0,0,0,
0x04,0x03,0x05,0x06,0x02,0x07,0x01,0x08,0x09,0x00,0x0a,0x0b,0xff },
{ 0,2,2,3,1,1,1,1,2,0,0,0,0,0,0,0,
0x03,0x02,0x04,0x01,0x05,0x00,0x06,0x07,0x09,0x08,0x0a,0x0b,0xff },
{ 0,0,6,3,1,1,2,0,0,0,0,0,0,0,0,0,
0x06,0x05,0x07,0x04,0x08,0x03,0x09,0x02,0x00,0x0a,0x01,0x0b,0xff },
};
static const uchar second_tree[3][180] = {
{ 0,2,2,2,1,4,2,1,2,5,1,1,0,0,0,139,
0x03,0x04,0x02,0x05,0x01,0x06,0x07,0x08,
0x12,0x13,0x11,0x14,0x09,0x15,0x22,0x00,0x21,0x16,0x0a,0xf0,
0x23,0x17,0x24,0x31,0x32,0x18,0x19,0x33,0x25,0x41,0x34,0x42,
0x35,0x51,0x36,0x37,0x38,0x29,0x79,0x26,0x1a,0x39,0x56,0x57,
0x28,0x27,0x52,0x55,0x58,0x43,0x76,0x59,0x77,0x54,0x61,0xf9,
0x71,0x78,0x75,0x96,0x97,0x49,0xb7,0x53,0xd7,0x74,0xb6,0x98,
0x47,0x48,0x95,0x69,0x99,0x91,0xfa,0xb8,0x68,0xb5,0xb9,0xd6,
0xf7,0xd8,0x67,0x46,0x45,0x94,0x89,0xf8,0x81,0xd5,0xf6,0xb4,
0x88,0xb1,0x2a,0x44,0x72,0xd9,0x87,0x66,0xd4,0xf5,0x3a,0xa7,
0x73,0xa9,0xa8,0x86,0x62,0xc7,0x65,0xc8,0xc9,0xa1,0xf4,0xd1,
0xe9,0x5a,0x92,0x85,0xa6,0xe7,0x93,0xe8,0xc1,0xc6,0x7a,0x64,
0xe1,0x4a,0x6a,0xe6,0xb3,0xf1,0xd3,0xa5,0x8a,0xb2,0x9a,0xba,
0x84,0xa4,0x63,0xe5,0xc5,0xf3,0xd2,0xc4,0x82,0xaa,0xda,0xe4,
0xf2,0xca,0x83,0xa3,0xa2,0xc3,0xea,0xc2,0xe2,0xe3,0xff,0xff },
{ 0,2,2,1,4,1,4,1,3,3,1,0,0,0,0,140,
0x02,0x03,0x01,0x04,0x05,0x12,0x11,0x06,
0x13,0x07,0x08,0x14,0x22,0x09,0x21,0x00,0x23,0x15,0x31,0x32,
0x0a,0x16,0xf0,0x24,0x33,0x41,0x42,0x19,0x17,0x25,0x18,0x51,
0x34,0x43,0x52,0x29,0x35,0x61,0x39,0x71,0x62,0x36,0x53,0x26,
0x38,0x1a,0x37,0x81,0x27,0x91,0x79,0x55,0x45,0x28,0x72,0x59,
0xa1,0xb1,0x44,0x69,0x54,0x58,0xd1,0xfa,0x57,0xe1,0xf1,0xb9,
0x49,0x47,0x63,0x6a,0xf9,0x56,0x46,0xa8,0x2a,0x4a,0x78,0x99,
0x3a,0x75,0x74,0x86,0x65,0xc1,0x76,0xb6,0x96,0xd6,0x89,0x85,
0xc9,0xf5,0x95,0xb4,0xc7,0xf7,0x8a,0x97,0xb8,0x73,0xb7,0xd8,
0xd9,0x87,0xa7,0x7a,0x48,0x82,0x84,0xea,0xf4,0xa6,0xc5,0x5a,
0x94,0xa4,0xc6,0x92,0xc3,0x68,0xb5,0xc8,0xe4,0xe5,0xe6,0xe9,
0xa2,0xa3,0xe3,0xc2,0x66,0x67,0x93,0xaa,0xd4,0xd5,0xe7,0xf8,
0x88,0x9a,0xd7,0x77,0xc4,0x64,0xe2,0x98,0xa5,0xca,0xda,0xe8,
0xf3,0xf6,0xa9,0xb2,0xb3,0xf2,0xd2,0x83,0xba,0xd3,0xff,0xff },
{ 0,0,6,2,1,3,3,2,5,1,2,2,8,10,0,117,
0x04,0x05,0x03,0x06,0x02,0x07,0x01,0x08,
0x09,0x12,0x13,0x14,0x11,0x15,0x0a,0x16,0x17,0xf0,0x00,0x22,
0x21,0x18,0x23,0x19,0x24,0x32,0x31,0x25,0x33,0x38,0x37,0x34,
0x35,0x36,0x39,0x79,0x57,0x58,0x59,0x28,0x56,0x78,0x27,0x41,
0x29,0x77,0x26,0x42,0x76,0x99,0x1a,0x55,0x98,0x97,0xf9,0x48,
0x54,0x96,0x89,0x47,0xb7,0x49,0xfa,0x75,0x68,0xb6,0x67,0x69,
0xb9,0xb8,0xd8,0x52,0xd7,0x88,0xb5,0x74,0x51,0x46,0xd9,0xf8,
0x3a,0xd6,0x87,0x45,0x7a,0x95,0xd5,0xf6,0x86,0xb4,0xa9,0x94,
0x53,0x2a,0xa8,0x43,0xf5,0xf7,0xd4,0x66,0xa7,0x5a,0x44,0x8a,
0xc9,0xe8,0xc8,0xe7,0x9a,0x6a,0x73,0x4a,0x61,0xc7,0xf4,0xc6,
0x65,0xe9,0x72,0xe6,0x71,0x91,0x93,0xa6,0xda,0x92,0x85,0x62,
0xf3,0xc5,0xb2,0xa4,0x84,0xba,0x64,0xa5,0xb3,0xd2,0x81,0xe5,
0xd3,0xaa,0xc4,0xca,0xf2,0xb1,0xe4,0xd1,0x83,0x63,0xea,0xc3,
0xe2,0x82,0xf1,0xa3,0xc2,0xa1,0xc1,0xe3,0xa2,0xe1,0xff,0xff }
};
if (table > 2) table = 2;
huff[0] = make_decoder ( first_tree[table]);
huff[1] = make_decoder (second_tree[table]);
}
/*
Return 0 if the image starts with compressed data,
1 if it starts with uncompressed low-order bits.
In Canon compressed data, 0xff is always followed by 0x00.
*/
int CLASS canon_has_lowbits()
{
uchar test[0x4000];
int ret=1, i;
fseek (ifp, 0, SEEK_SET);
fread (test, 1, sizeof test, ifp);
for (i=540; i < sizeof test - 1; i++)
if (test[i] == 0xff) {
if (test[i+1]) return 1;
ret=0;
}
return ret;
}
void CLASS canon_load_raw()
{
ushort *pixel, *prow, *huff[2];
int nblocks, lowbits, i, c, row, r, save, val;
int block, diffbuf[64], leaf, len, diff, carry=0, pnum=0, base[2];
crw_init_tables (tiff_compress, huff);
lowbits = canon_has_lowbits();
if (!lowbits) maximum = 0x3ff;
fseek (ifp, 540 + lowbits*raw_height*raw_width/4, SEEK_SET);
zero_after_ff = 1;
getbits(-1);
for (row=0; row < raw_height; row+=8) {
pixel = raw_image + row*raw_width;
nblocks = MIN (8, raw_height-row) * raw_width >> 6;
for (block=0; block < nblocks; block++) {
memset (diffbuf, 0, sizeof diffbuf);
for (i=0; i < 64; i++ ) {
leaf = gethuff(huff[i > 0]);
if (leaf == 0 && i) break;
if (leaf == 0xff) continue;
i += leaf >> 4;
len = leaf & 15;
if (len == 0) continue;
diff = getbits(len);
if ((diff & (1 << (len-1))) == 0)
diff -= (1 << len) - 1;
if (i < 64) diffbuf[i] = diff;
}
diffbuf[0] += carry;
carry = diffbuf[0];
for (i=0; i < 64; i++ ) {
if (pnum++ % raw_width == 0)
base[0] = base[1] = 512;
if ((pixel[(block << 6) + i] = base[i & 1] += diffbuf[i]) >> 10)
derror();
}
}
if (lowbits) {
save = ftell(ifp);
fseek (ifp, 26 + row*raw_width/4, SEEK_SET);
for (prow=pixel, i=0; i < raw_width*2; i++) {
c = fgetc(ifp);
for (r=0; r < 8; r+=2, prow++) {
val = (*prow << 2) + ((c >> r) & 3);
if (raw_width == 2672 && val < 512) val += 2;
*prow = val;
}
}
fseek (ifp, save, SEEK_SET);
}
}
FORC(2) free (huff[c]);
}
struct jhead {
int algo, bits, high, wide, clrs, sraw, psv, restart, vpred[6];
ushort quant[64], idct[64], *huff[20], *free[20], *row;
};
int CLASS ljpeg_start (struct jhead *jh, int info_only)
{
ushort c, tag, len;
uchar data[0x10000];
const uchar *dp;
memset (jh, 0, sizeof *jh);
jh->restart = INT_MAX;
if ((fgetc(ifp),fgetc(ifp)) != 0xd8) return 0;
do {
if (!fread (data, 2, 2, ifp)) return 0;
tag = data[0] << 8 | data[1];
len = (data[2] << 8 | data[3]) - 2;
if (tag <= 0xff00) return 0;
fread (data, 1, len, ifp);
switch (tag) {
case 0xffc3:
jh->sraw = ((data[7] >> 4) * (data[7] & 15) - 1) & 3;
case 0xffc1:
case 0xffc0:
jh->algo = tag & 0xff;
jh->bits = data[0];
jh->high = data[1] << 8 | data[2];
jh->wide = data[3] << 8 | data[4];
jh->clrs = data[5] + jh->sraw;
if (len == 9 && !dng_version) getc(ifp);
break;
case 0xffc4:
if (info_only) break;
for (dp = data; dp < data+len && !((c = *dp++) & -20); )
jh->free[c] = jh->huff[c] = make_decoder_ref (&dp);
break;
case 0xffda:
jh->psv = data[1+data[0]*2];
jh->bits -= data[3+data[0]*2] & 15;
break;
case 0xffdb:
FORC(64) jh->quant[c] = data[c*2+1] << 8 | data[c*2+2];
break;
case 0xffdd:
jh->restart = data[0] << 8 | data[1];
}
} while (tag != 0xffda);
if (jh->bits > 16 || jh->clrs > 6 ||
!jh->bits || !jh->high || !jh->wide || !jh->clrs) return 0;
if (info_only) return 1;
if (!jh->huff[0]) return 0;
FORC(19) if (!jh->huff[c+1]) jh->huff[c+1] = jh->huff[c];
if (jh->sraw) {
FORC(4) jh->huff[2+c] = jh->huff[1];
FORC(jh->sraw) jh->huff[1+c] = jh->huff[0];
}
jh->row = (ushort *) calloc (jh->wide*jh->clrs, 4);
merror (jh->row, "ljpeg_start()");
return zero_after_ff = 1;
}
void CLASS ljpeg_end (struct jhead *jh)
{
int c;
FORC4 if (jh->free[c]) free (jh->free[c]);
free (jh->row);
}
int CLASS ljpeg_diff (ushort *huff)
{
int len, diff;
len = gethuff(huff);
if (len == 16 && (!dng_version || dng_version >= 0x1010000))
return -32768;
diff = getbits(len);
if ((diff & (1 << (len-1))) == 0)
diff -= (1 << len) - 1;
return diff;
}
ushort * CLASS ljpeg_row (int jrow, struct jhead *jh)
{
int col, c, diff, pred, spred=0;
ushort mark=0, *row[3];
if (jrow * jh->wide % jh->restart == 0) {
FORC(6) jh->vpred[c] = 1 << (jh->bits-1);
if (jrow) {
fseek (ifp, -2, SEEK_CUR);
do mark = (mark << 8) + (c = fgetc(ifp));
while (c != EOF && mark >> 4 != 0xffd);
}
getbits(-1);
}
FORC3 row[c] = jh->row + jh->wide*jh->clrs*((jrow+c) & 1);
for (col=0; col < jh->wide; col++)
FORC(jh->clrs) {
diff = ljpeg_diff (jh->huff[c]);
if (jh->sraw && c <= jh->sraw && (col | c))
pred = spred;
else if (col) pred = row[0][-jh->clrs];
else pred = (jh->vpred[c] += diff) - diff;
if (jrow && col) switch (jh->psv) {
case 1: break;
case 2: pred = row[1][0]; break;
case 3: pred = row[1][-jh->clrs]; break;
case 4: pred = pred + row[1][0] - row[1][-jh->clrs]; break;
case 5: pred = pred + ((row[1][0] - row[1][-jh->clrs]) >> 1); break;
case 6: pred = row[1][0] + ((pred - row[1][-jh->clrs]) >> 1); break;
case 7: pred = (pred + row[1][0]) >> 1; break;
default: pred = 0;
}
if ((**row = pred + diff) >> jh->bits) derror();
if (c <= jh->sraw) spred = **row;
row[0]++; row[1]++;
}
return row[2];
}
void CLASS lossless_jpeg_load_raw()
{
int jwide, jrow, jcol, val, jidx, i, j, row=0, col=0;
struct jhead jh;
ushort *rp;
if (!ljpeg_start (&jh, 0)) return;
jwide = jh.wide * jh.clrs;
for (jrow=0; jrow < jh.high; jrow++) {
rp = ljpeg_row (jrow, &jh);
if (load_flags & 1)
row = jrow & 1 ? height-1-jrow/2 : jrow/2;
for (jcol=0; jcol < jwide; jcol++) {
val = curve[*rp++];
if (cr2_slice[0]) {
jidx = jrow*jwide + jcol;
i = jidx / (cr2_slice[1]*raw_height);
if ((j = i >= cr2_slice[0]))
i = cr2_slice[0];
jidx -= i * (cr2_slice[1]*raw_height);
row = jidx / cr2_slice[1+j];
col = jidx % cr2_slice[1+j] + i*cr2_slice[1];
}
if (raw_width == 3984 && (col -= 2) < 0)
col += (row--,raw_width);
if ((unsigned) row < raw_height) RAW(row,col) = val;
if (++col >= raw_width)
col = (row++,0);
}
}
ljpeg_end (&jh);
}
void CLASS canon_sraw_load_raw()
{
struct jhead jh;
short *rp=0, (*ip)[4];
int jwide, slice, scol, ecol, row, col, jrow=0, jcol=0, pix[3], c;
int v[3]={0,0,0}, ver, hue;
char *cp;
if (!ljpeg_start (&jh, 0) || jh.clrs < 4) return;
jwide = (jh.wide >>= 1) * jh.clrs;
for (ecol=slice=0; slice <= cr2_slice[0]; slice++) {
scol = ecol;
ecol += cr2_slice[1] * 2 / jh.clrs;
if (!cr2_slice[0] || ecol > raw_width-1) ecol = raw_width & -2;
for (row=0; row < height; row += (jh.clrs >> 1) - 1) {
ip = (short (*)[4]) image + row*width;
for (col=scol; col < ecol; col+=2, jcol+=jh.clrs) {
if ((jcol %= jwide) == 0)
rp = (short *) ljpeg_row (jrow++, &jh);
if (col >= width) continue;
FORC (jh.clrs-2)
ip[col + (c >> 1)*width + (c & 1)][0] = rp[jcol+c];
ip[col][1] = rp[jcol+jh.clrs-2] - 16384;
ip[col][2] = rp[jcol+jh.clrs-1] - 16384;
}
}
}
for (cp=model2; *cp && !isdigit(*cp); cp++);
sscanf (cp, "%d.%d.%d", v, v+1, v+2);
ver = (v[0]*1000 + v[1])*1000 + v[2];
hue = (jh.sraw+1) << 2;
if (unique_id >= 0x80000281 || (unique_id == 0x80000218 && ver > 1000006))
hue = jh.sraw << 1;
ip = (short (*)[4]) image;
rp = ip[0];
for (row=0; row < height; row++, ip+=width) {
if (row & (jh.sraw >> 1))
for (col=0; col < width; col+=2)
for (c=1; c < 3; c++)
if (row == height-1)
ip[col][c] = ip[col-width][c];
else ip[col][c] = (ip[col-width][c] + ip[col+width][c] + 1) >> 1;
for (col=1; col < width; col+=2)
for (c=1; c < 3; c++)
if (col == width-1)
ip[col][c] = ip[col-1][c];
else ip[col][c] = (ip[col-1][c] + ip[col+1][c] + 1) >> 1;
}
for ( ; rp < ip[0]; rp+=4) {
if (unique_id == 0x80000218 ||
unique_id == 0x80000250 ||
unique_id == 0x80000261 ||
unique_id == 0x80000281 ||
unique_id == 0x80000287) {
rp[1] = (rp[1] << 2) + hue;
rp[2] = (rp[2] << 2) + hue;
pix[0] = rp[0] + (( 50*rp[1] + 22929*rp[2]) >> 14);
pix[1] = rp[0] + ((-5640*rp[1] - 11751*rp[2]) >> 14);
pix[2] = rp[0] + ((29040*rp[1] - 101*rp[2]) >> 14);
} else {
if (unique_id < 0x80000218) rp[0] -= 512;
pix[0] = rp[0] + rp[2];
pix[2] = rp[0] + rp[1];
pix[1] = rp[0] + ((-778*rp[1] - (rp[2] << 11)) >> 12);
}
FORC3 rp[c] = CLIP(pix[c] * sraw_mul[c] >> 10);
}
ljpeg_end (&jh);
maximum = 0x3fff;
}
void CLASS adobe_copy_pixel (unsigned row, unsigned col, ushort **rp)
{
int c;
if (tiff_samples == 2 && shot_select) (*rp)++;
if (raw_image) {
if (row < raw_height && col < raw_width)
RAW(row,col) = curve[**rp];
*rp += tiff_samples;
} else {
if (row < height && col < width)
FORC(tiff_samples)
image[row*width+col][c] = curve[(*rp)[c]];
*rp += tiff_samples;
}
if (tiff_samples == 2 && shot_select) (*rp)--;
}
void CLASS ljpeg_idct (struct jhead *jh)
{
int c, i, j, len, skip, coef;
float work[3][8][8];
static float cs[106] = { 0 };
static const uchar zigzag[80] =
{ 0, 1, 8,16, 9, 2, 3,10,17,24,32,25,18,11, 4, 5,12,19,26,33,
40,48,41,34,27,20,13, 6, 7,14,21,28,35,42,49,56,57,50,43,36,
29,22,15,23,30,37,44,51,58,59,52,45,38,31,39,46,53,60,61,54,
47,55,62,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63 };
if (!cs[0])
FORC(106) cs[c] = cos((c & 31)*M_PI/16)/2;
memset (work, 0, sizeof work);
work[0][0][0] = jh->vpred[0] += ljpeg_diff (jh->huff[0]) * jh->quant[0];
for (i=1; i < 64; i++ ) {
len = gethuff (jh->huff[16]);
i += skip = len >> 4;
if (!(len &= 15) && skip < 15) break;
coef = getbits(len);
if ((coef & (1 << (len-1))) == 0)
coef -= (1 << len) - 1;
((float *)work)[zigzag[i]] = coef * jh->quant[i];
}
FORC(8) work[0][0][c] *= M_SQRT1_2;
FORC(8) work[0][c][0] *= M_SQRT1_2;
for (i=0; i < 8; i++)
for (j=0; j < 8; j++)
FORC(8) work[1][i][j] += work[0][i][c] * cs[(j*2+1)*c];
for (i=0; i < 8; i++)
for (j=0; j < 8; j++)
FORC(8) work[2][i][j] += work[1][c][j] * cs[(i*2+1)*c];
FORC(64) jh->idct[c] = CLIP(((float *)work[2])[c]+0.5);
}
void CLASS lossless_dng_load_raw()
{
unsigned save, trow=0, tcol=0, jwide, jrow, jcol, row, col, i, j;
struct jhead jh;
ushort *rp;
while (trow < raw_height) {
save = ftell(ifp);
if (tile_length < INT_MAX)
fseek (ifp, get4(), SEEK_SET);
if (!ljpeg_start (&jh, 0)) break;
jwide = jh.wide;
if (filters) jwide *= jh.clrs;
jwide /= MIN (is_raw, tiff_samples);
switch (jh.algo) {
case 0xc1:
jh.vpred[0] = 16384;
getbits(-1);
for (jrow=0; jrow+7 < jh.high; jrow += 8) {
for (jcol=0; jcol+7 < jh.wide; jcol += 8) {
ljpeg_idct (&jh);
rp = jh.idct;
row = trow + jcol/tile_width + jrow*2;
col = tcol + jcol%tile_width;
for (i=0; i < 16; i+=2)
for (j=0; j < 8; j++)
adobe_copy_pixel (row+i, col+j, &rp);
}
}
break;
case 0xc3:
for (row=col=jrow=0; jrow < jh.high; jrow++) {
rp = ljpeg_row (jrow, &jh);
for (jcol=0; jcol < jwide; jcol++) {
adobe_copy_pixel (trow+row, tcol+col, &rp);
if (++col >= tile_width || col >= raw_width)
row += 1 + (col = 0);
}
}
}
fseek (ifp, save+4, SEEK_SET);
if ((tcol += tile_width) >= raw_width)
trow += tile_length + (tcol = 0);
ljpeg_end (&jh);
}
}
void CLASS packed_dng_load_raw()
{
ushort *pixel, *rp;
int row, col;
pixel = (ushort *) calloc (raw_width, tiff_samples*sizeof *pixel);
merror (pixel, "packed_dng_load_raw()");
for (row=0; row < raw_height; row++) {
if (tiff_bps == 16)
read_shorts (pixel, raw_width * tiff_samples);
else {
getbits(-1);
for (col=0; col < raw_width * tiff_samples; col++)
pixel[col] = getbits(tiff_bps);
}
for (rp=pixel, col=0; col < raw_width; col++)
adobe_copy_pixel (row, col, &rp);
}
free (pixel);
}
void CLASS pentax_load_raw()
{
ushort bit[2][15], huff[4097];
int dep, row, col, diff, c, i;
ushort vpred[2][2] = {{0,0},{0,0}}, hpred[2];
fseek (ifp, meta_offset, SEEK_SET);
dep = (get2() + 12) & 15;
fseek (ifp, 12, SEEK_CUR);
FORC(dep) bit[0][c] = get2();
FORC(dep) bit[1][c] = fgetc(ifp);
FORC(dep)
for (i=bit[0][c]; i <= ((bit[0][c]+(4096 >> bit[1][c])-1) & 4095); )
huff[++i] = bit[1][c] << 8 | c;
huff[0] = 12;
fseek (ifp, data_offset, SEEK_SET);
getbits(-1);
for (row=0; row < raw_height; row++)
for (col=0; col < raw_width; col++) {
diff = ljpeg_diff (huff);
if (col < 2) hpred[col] = vpred[row & 1][col] += diff;
else hpred[col & 1] += diff;
RAW(row,col) = hpred[col & 1];
if (hpred[col & 1] >> tiff_bps) derror();
}
}
void CLASS nikon_load_raw()
{
static const uchar nikon_tree[][32] = {
{ 0,1,5,1,1,1,1,1,1,2,0,0,0,0,0,0, /* 12-bit lossy */
5,4,3,6,2,7,1,0,8,9,11,10,12 },
{ 0,1,5,1,1,1,1,1,1,2,0,0,0,0,0,0, /* 12-bit lossy after split */
0x39,0x5a,0x38,0x27,0x16,5,4,3,2,1,0,11,12,12 },
{ 0,1,4,2,3,1,2,0,0,0,0,0,0,0,0,0, /* 12-bit lossless */
5,4,6,3,7,2,8,1,9,0,10,11,12 },
{ 0,1,4,3,1,1,1,1,1,2,0,0,0,0,0,0, /* 14-bit lossy */
5,6,4,7,8,3,9,2,1,0,10,11,12,13,14 },
{ 0,1,5,1,1,1,1,1,1,1,2,0,0,0,0,0, /* 14-bit lossy after split */
8,0x5c,0x4b,0x3a,0x29,7,6,5,4,3,2,1,0,13,14 },
{ 0,1,4,2,2,3,1,2,0,0,0,0,0,0,0,0, /* 14-bit lossless */
7,6,8,5,9,4,10,3,11,12,2,0,1,13,14 } };
ushort *huff, ver0, ver1, vpred[2][2], hpred[2], csize;
int i, min, max, step=0, tree=0, split=0, row, col, len, shl, diff;
fseek (ifp, meta_offset, SEEK_SET);
ver0 = fgetc(ifp);
ver1 = fgetc(ifp);
if (ver0 == 0x49 || ver1 == 0x58)
fseek (ifp, 2110, SEEK_CUR);
if (ver0 == 0x46) tree = 2;
if (tiff_bps == 14) tree += 3;
read_shorts (vpred[0], 4);
max = 1 << tiff_bps & 0x7fff;
if ((csize = get2()) > 1)
step = max / (csize-1);
if (ver0 == 0x44 && ver1 == 0x20 && step > 0) {
for (i=0; i < csize; i++)
curve[i*step] = get2();
for (i=0; i < max; i++)
curve[i] = ( curve[i-i%step]*(step-i%step) +
curve[i-i%step+step]*(i%step) ) / step;
fseek (ifp, meta_offset+562, SEEK_SET);
split = get2();
} else if (ver0 != 0x46 && csize <= 0x4001)
read_shorts (curve, max=csize);
while (curve[max-2] == curve[max-1]) max--;
huff = make_decoder (nikon_tree[tree]);
fseek (ifp, data_offset, SEEK_SET);
getbits(-1);
for (min=row=0; row < height; row++) {
if (split && row == split) {
free (huff);
huff = make_decoder (nikon_tree[tree+1]);
max += (min = 16) << 1;
}
for (col=0; col < raw_width; col++) {
i = gethuff(huff);
len = i & 15;
shl = i >> 4;
diff = ((getbits(len-shl) << 1) + 1) << shl >> 1;
if ((diff & (1 << (len-1))) == 0)
diff -= (1 << len) - !shl;
if (col < 2) hpred[col] = vpred[row & 1][col] += diff;
else hpred[col & 1] += diff;
if ((ushort)(hpred[col & 1] + min) >= max) derror();
RAW(row,col) = curve[LIM((short)hpred[col & 1],0,0x3fff)];
}
}
free (huff);
}
void CLASS nikon_yuv_load_raw()
{
int row, col, yuv[4], rgb[3], b, c;
UINT64 bitbuf=0;
for (row=0; row < raw_height; row++)
for (col=0; col < raw_width; col++) {
if (!(b = col & 1)) {
bitbuf = 0;
FORC(6) bitbuf |= (UINT64) fgetc(ifp) << c*8;
FORC(4) yuv[c] = (bitbuf >> c*12 & 0xfff) - (c >> 1 << 11);
}
rgb[0] = yuv[b] + 1.370705*yuv[3];
rgb[1] = yuv[b] - 0.337633*yuv[2] - 0.698001*yuv[3];
rgb[2] = yuv[b] + 1.732446*yuv[2];
FORC3 image[row*width+col][c] = curve[LIM(rgb[c],0,0xfff)] / cam_mul[c];
}
}
/*
Returns 1 for a Coolpix 995, 0 for anything else.
*/
int CLASS nikon_e995()
{
int i, histo[256];
const uchar often[] = { 0x00, 0x55, 0xaa, 0xff };
memset (histo, 0, sizeof histo);
fseek (ifp, -2000, SEEK_END);
for (i=0; i < 2000; i++)
histo[fgetc(ifp)]++;
for (i=0; i < 4; i++)
if (histo[often[i]] < 200)
return 0;
return 1;
}
/*
Returns 1 for a Coolpix 2100, 0 for anything else.
*/
int CLASS nikon_e2100()
{
uchar t[12];
int i;
fseek (ifp, 0, SEEK_SET);
for (i=0; i < 1024; i++) {
fread (t, 1, 12, ifp);
if (((t[2] & t[4] & t[7] & t[9]) >> 4
& t[1] & t[6] & t[8] & t[11] & 3) != 3)
return 0;
}
return 1;
}
void CLASS nikon_3700()
{
int bits, i;
uchar dp[24];
static const struct {
int bits;
char make[12], model[15];
} table[] = {
{ 0x00, "Pentax", "Optio 33WR" },
{ 0x03, "Nikon", "E3200" },
{ 0x32, "Nikon", "E3700" },
{ 0x33, "Olympus", "C740UZ" } };
fseek (ifp, 3072, SEEK_SET);
fread (dp, 1, 24, ifp);
bits = (dp[8] & 3) << 4 | (dp[20] & 3);
for (i=0; i < sizeof table / sizeof *table; i++)
if (bits == table[i].bits) {
strcpy (make, table[i].make );
strcpy (model, table[i].model);
}
}
/*
Separates a Minolta DiMAGE Z2 from a Nikon E4300.
*/
int CLASS minolta_z2()
{
int i, nz;
char tail[424];
fseek (ifp, -sizeof tail, SEEK_END);
fread (tail, 1, sizeof tail, ifp);
for (nz=i=0; i < sizeof tail; i++)
if (tail[i]) nz++;
return nz > 20;
}
void CLASS jpeg_thumb();
void CLASS ppm_thumb()
{
char *thumb;
thumb_length = thumb_width*thumb_height*3;
thumb = (char *) malloc (thumb_length);
merror (thumb, "ppm_thumb()");
fprintf (ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height);
fread (thumb, 1, thumb_length, ifp);
fwrite (thumb, 1, thumb_length, ofp);
free (thumb);
}
void CLASS ppm16_thumb()
{
int i;
char *thumb;
thumb_length = thumb_width*thumb_height*3;
thumb = (char *) calloc (thumb_length, 2);
merror (thumb, "ppm16_thumb()");
read_shorts ((ushort *) thumb, thumb_length);
for (i=0; i < thumb_length; i++)
thumb[i] = ((ushort *) thumb)[i] >> 8;
fprintf (ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height);
fwrite (thumb, 1, thumb_length, ofp);
free (thumb);
}
void CLASS layer_thumb()
{
int i, c;
char *thumb, map[][4] = { "012","102" };
colors = thumb_misc >> 5 & 7;
thumb_length = thumb_width*thumb_height;
thumb = (char *) calloc (colors, thumb_length);
merror (thumb, "layer_thumb()");
fprintf (ofp, "P%d\n%d %d\n255\n",
5 + (colors >> 1), thumb_width, thumb_height);
fread (thumb, thumb_length, colors, ifp);
for (i=0; i < thumb_length; i++)
FORCC putc (thumb[i+thumb_length*(map[thumb_misc >> 8][c]-'0')], ofp);
free (thumb);
}
void CLASS rollei_thumb()
{
unsigned i;
ushort *thumb;
thumb_length = thumb_width * thumb_height;
thumb = (ushort *) calloc (thumb_length, 2);
merror (thumb, "rollei_thumb()");
fprintf (ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height);
read_shorts (thumb, thumb_length);
for (i=0; i < thumb_length; i++) {
putc (thumb[i] << 3, ofp);
putc (thumb[i] >> 5 << 2, ofp);
putc (thumb[i] >> 11 << 3, ofp);
}
free (thumb);
}
void CLASS rollei_load_raw()
{
uchar pixel[10];
unsigned iten=0, isix, i, buffer=0, todo[16];
isix = raw_width * raw_height * 5 / 8;
while (fread (pixel, 1, 10, ifp) == 10) {
for (i=0; i < 10; i+=2) {
todo[i] = iten++;
todo[i+1] = pixel[i] << 8 | pixel[i+1];
buffer = pixel[i] >> 2 | buffer << 6;
}
for ( ; i < 16; i+=2) {
todo[i] = isix++;
todo[i+1] = buffer >> (14-i)*5;
}
for (i=0; i < 16; i+=2)
raw_image[todo[i]] = (todo[i+1] & 0x3ff);
}
maximum = 0x3ff;
}
int CLASS raw (unsigned row, unsigned col)
{
return (row < raw_height && col < raw_width) ? RAW(row,col) : 0;
}
void CLASS phase_one_flat_field (int is_float, int nc)
{
ushort head[8];
unsigned wide, high, y, x, c, rend, cend, row, col;
float *mrow, num, mult[4];
read_shorts (head, 8);
if (head[2] * head[3] * head[4] * head[5] == 0) return;
wide = head[2] / head[4] + (head[2] % head[4] != 0);
high = head[3] / head[5] + (head[3] % head[5] != 0);
mrow = (float *) calloc (nc*wide, sizeof *mrow);
merror (mrow, "phase_one_flat_field()");
for (y=0; y < high; y++) {
for (x=0; x < wide; x++)
for (c=0; c < nc; c+=2) {
num = is_float ? getreal(11) : get2()/32768.0;
if (y==0) mrow[c*wide+x] = num;
else mrow[(c+1)*wide+x] = (num - mrow[c*wide+x]) / head[5];
}
if (y==0) continue;
rend = head[1] + y*head[5];
for (row = rend-head[5];
row < raw_height && row < rend &&
row < head[1]+head[3]-head[5]; row++) {
for (x=1; x < wide; x++) {
for (c=0; c < nc; c+=2) {
mult[c] = mrow[c*wide+x-1];
mult[c+1] = (mrow[c*wide+x] - mult[c]) / head[4];
}
cend = head[0] + x*head[4];
for (col = cend-head[4];
col < raw_width &&
col < cend && col < head[0]+head[2]-head[4]; col++) {
c = nc > 2 ? FC(row-top_margin,col-left_margin) : 0;
if (!(c & 1)) {
c = RAW(row,col) * mult[c];
RAW(row,col) = LIM(c,0,65535);
}
for (c=0; c < nc; c+=2)
mult[c] += mult[c+1];
}
}
for (x=0; x < wide; x++)
for (c=0; c < nc; c+=2)
mrow[c*wide+x] += mrow[(c+1)*wide+x];
}
}
free (mrow);
}
void CLASS phase_one_correct()
{
unsigned entries, tag, data, save, col, row, type;
int len, i, j, k, cip, val[4], dev[4], sum, max;
int head[9], diff, mindiff=INT_MAX, off_412=0;
static const signed char dir[12][2] =
{ {-1,-1}, {-1,1}, {1,-1}, {1,1}, {-2,0}, {0,-2}, {0,2}, {2,0},
{-2,-2}, {-2,2}, {2,-2}, {2,2} };
float poly[8], num, cfrac, frac, mult[2], *yval[2];
ushort *xval[2];
int qmult_applied = 0, qlin_applied = 0;
if (half_size || !meta_length) return;
if (verbose) fprintf (stderr,_("Phase One correction...\n"));
fseek (ifp, meta_offset, SEEK_SET);
order = get2();
fseek (ifp, 6, SEEK_CUR);
fseek (ifp, meta_offset+get4(), SEEK_SET);
entries = get4(); get4();
while (entries--) {
tag = get4();
len = get4();
data = get4();
save = ftell(ifp);
fseek (ifp, meta_offset+data, SEEK_SET);
if (tag == 0x419) { /* Polynomial curve */
for (get4(), i=0; i < 8; i++)
poly[i] = getreal(11);
poly[3] += (ph1.tag_210 - poly[7]) * poly[6] + 1;
for (i=0; i < 0x10000; i++) {
num = (poly[5]*i + poly[3])*i + poly[1];
curve[i] = LIM(num,0,65535);
} goto apply; /* apply to right half */
} else if (tag == 0x41a) { /* Polynomial curve */
for (i=0; i < 4; i++)
poly[i] = getreal(11);
for (i=0; i < 0x10000; i++) {
for (num=0, j=4; j--; )
num = num * i + poly[j];
curve[i] = LIM(num+i,0,65535);
} apply: /* apply to whole image */
for (row=0; row < raw_height; row++)
for (col = (tag & 1)*ph1.split_col; col < raw_width; col++)
RAW(row,col) = curve[RAW(row,col)];
} else if (tag == 0x400) { /* Sensor defects */
while ((len -= 8) >= 0) {
col = get2();
row = get2();
type = get2(); get2();
if (col >= raw_width) continue;
if (type == 131 || type == 137) /* Bad column */
for (row=0; row < raw_height; row++)
if (FC(row-top_margin,col-left_margin) == 1) {
for (sum=i=0; i < 4; i++)
sum += val[i] = raw (row+dir[i][0], col+dir[i][1]);
for (max=i=0; i < 4; i++) {
dev[i] = abs((val[i] << 2) - sum);
if (dev[max] < dev[i]) max = i;
}
RAW(row,col) = (sum - val[max])/3.0 + 0.5;
} else {
for (sum=0, i=8; i < 12; i++)
sum += raw (row+dir[i][0], col+dir[i][1]);
RAW(row,col) = 0.5 + sum * 0.0732233 +
(raw(row,col-2) + raw(row,col+2)) * 0.3535534;
}
else if (type == 129) { /* Bad pixel */
if (row >= raw_height) continue;
j = (FC(row-top_margin,col-left_margin) != 1) * 4;
for (sum=0, i=j; i < j+8; i++)
sum += raw (row+dir[i][0], col+dir[i][1]);
RAW(row,col) = (sum + 4) >> 3;
}
}
} else if (tag == 0x401) { /* All-color flat fields */
phase_one_flat_field (1, 2);
} else if (tag == 0x416 || tag == 0x410) {
phase_one_flat_field (0, 2);
} else if (tag == 0x40b) { /* Red+blue flat field */
phase_one_flat_field (0, 4);
} else if (tag == 0x412) {
fseek (ifp, 36, SEEK_CUR);
diff = abs (get2() - ph1.tag_21a);
if (mindiff > diff) {
mindiff = diff;
off_412 = ftell(ifp) - 38;
}
} else if (tag == 0x41f && !qlin_applied) { /* Quadrant linearization */
ushort lc[2][2][16], ref[16];
int qr, qc;
for (qr = 0; qr < 2; qr++)
for (qc = 0; qc < 2; qc++)
for (i = 0; i < 16; i++)
lc[qr][qc][i] = get4();
for (i = 0; i < 16; i++) {
int v = 0;
for (qr = 0; qr < 2; qr++)
for (qc = 0; qc < 2; qc++)
v += lc[qr][qc][i];
ref[i] = (v + 2) >> 2;
}
for (qr = 0; qr < 2; qr++) {
for (qc = 0; qc < 2; qc++) {
int cx[19], cf[19];
for (i = 0; i < 16; i++) {
cx[1+i] = lc[qr][qc][i];
cf[1+i] = ref[i];
}
cx[0] = cf[0] = 0;
cx[17] = cf[17] = ((unsigned) ref[15] * 65535) / lc[qr][qc][15];
cx[18] = cf[18] = 65535;
cubic_spline(cx, cf, 19);
for (row = (qr ? ph1.split_row : 0);
row < (qr ? raw_height : ph1.split_row); row++)
for (col = (qc ? ph1.split_col : 0);
col < (qc ? raw_width : ph1.split_col); col++)
RAW(row,col) = curve[RAW(row,col)];
}
}
qlin_applied = 1;
} else if (tag == 0x41e && !qmult_applied) { /* Quadrant multipliers */
float qmult[2][2] = { { 1, 1 }, { 1, 1 } };
get4(); get4(); get4(); get4();
qmult[0][0] = 1.0 + getreal(11);
get4(); get4(); get4(); get4(); get4();
qmult[0][1] = 1.0 + getreal(11);
get4(); get4(); get4();
qmult[1][0] = 1.0 + getreal(11);
get4(); get4(); get4();
qmult[1][1] = 1.0 + getreal(11);
for (row=0; row < raw_height; row++)
for (col=0; col < raw_width; col++) {
i = qmult[row >= ph1.split_row][col >= ph1.split_col] * RAW(row,col);
RAW(row,col) = LIM(i,0,65535);
}
qmult_applied = 1;
} else if (tag == 0x431 && !qmult_applied) { /* Quadrant combined */
ushort lc[2][2][7], ref[7];
int qr, qc;
for (i = 0; i < 7; i++)
ref[i] = get4();
for (qr = 0; qr < 2; qr++)
for (qc = 0; qc < 2; qc++)
for (i = 0; i < 7; i++)
lc[qr][qc][i] = get4();
for (qr = 0; qr < 2; qr++) {
for (qc = 0; qc < 2; qc++) {
int cx[9], cf[9];
for (i = 0; i < 7; i++) {
cx[1+i] = ref[i];
cf[1+i] = ((unsigned) ref[i] * lc[qr][qc][i]) / 10000;
}
cx[0] = cf[0] = 0;
cx[8] = cf[8] = 65535;
cubic_spline(cx, cf, 9);
for (row = (qr ? ph1.split_row : 0);
row < (qr ? raw_height : ph1.split_row); row++)
for (col = (qc ? ph1.split_col : 0);
col < (qc ? raw_width : ph1.split_col); col++)
RAW(row,col) = curve[RAW(row,col)];
}
}
qmult_applied = 1;
qlin_applied = 1;
}
fseek (ifp, save, SEEK_SET);
}
if (off_412) {
fseek (ifp, off_412, SEEK_SET);
for (i=0; i < 9; i++) head[i] = get4() & 0x7fff;
yval[0] = (float *) calloc (head[1]*head[3] + head[2]*head[4], 6);
merror (yval[0], "phase_one_correct()");
yval[1] = (float *) (yval[0] + head[1]*head[3]);
xval[0] = (ushort *) (yval[1] + head[2]*head[4]);
xval[1] = (ushort *) (xval[0] + head[1]*head[3]);
get2();
for (i=0; i < 2; i++)
for (j=0; j < head[i+1]*head[i+3]; j++)
yval[i][j] = getreal(11);
for (i=0; i < 2; i++)
for (j=0; j < head[i+1]*head[i+3]; j++)
xval[i][j] = get2();
for (row=0; row < raw_height; row++)
for (col=0; col < raw_width; col++) {
cfrac = (float) col * head[3] / raw_width;
cfrac -= cip = cfrac;
num = RAW(row,col) * 0.5;
for (i=cip; i < cip+2; i++) {
for (k=j=0; j < head[1]; j++)
if (num < xval[0][k = head[1]*i+j]) break;
frac = (j == 0 || j == head[1]) ? 0 :
(xval[0][k] - num) / (xval[0][k] - xval[0][k-1]);
mult[i-cip] = yval[0][k-1] * frac + yval[0][k] * (1-frac);
}
i = ((mult[0] * (1-cfrac) + mult[1] * cfrac) * row + num) * 2;
RAW(row,col) = LIM(i,0,65535);
}
free (yval[0]);
}
}
void CLASS phase_one_load_raw()
{
int a, b, i;
ushort akey, bkey, mask;
fseek (ifp, ph1.key_off, SEEK_SET);
akey = get2();
bkey = get2();
mask = ph1.format == 1 ? 0x5555:0x1354;
fseek (ifp, data_offset, SEEK_SET);
read_shorts (raw_image, raw_width*raw_height);
if (ph1.format)
for (i=0; i < raw_width*raw_height; i+=2) {
a = raw_image[i+0] ^ akey;
b = raw_image[i+1] ^ bkey;
raw_image[i+0] = (a & mask) | (b & ~mask);
raw_image[i+1] = (b & mask) | (a & ~mask);
}
}
unsigned CLASS ph1_bithuff (int nbits, ushort *huff)
{
static UINT64 bitbuf=0;
static int vbits=0;
unsigned c;
if (nbits == -1)
return bitbuf = vbits = 0;
if (nbits == 0) return 0;
if (vbits < nbits) {
bitbuf = bitbuf << 32 | get4();
vbits += 32;
}
c = bitbuf << (64-vbits) >> (64-nbits);
if (huff) {
vbits -= huff[c] >> 8;
return (uchar) huff[c];
}
vbits -= nbits;
return c;
}
#define ph1_bits(n) ph1_bithuff(n,0)
#define ph1_huff(h) ph1_bithuff(*h,h+1)
void CLASS phase_one_load_raw_c()
{
static const int length[] = { 8,7,6,9,11,10,5,12,14,13 };
int *offset, len[2], pred[2], row, col, i, j;
ushort *pixel;
short (*cblack)[2], (*rblack)[2];
pixel = (ushort *) calloc (raw_width*3 + raw_height*4, 2);
merror (pixel, "phase_one_load_raw_c()");
offset = (int *) (pixel + raw_width);
fseek (ifp, strip_offset, SEEK_SET);
for (row=0; row < raw_height; row++)
offset[row] = get4();
cblack = (short (*)[2]) (offset + raw_height);
fseek (ifp, ph1.black_col, SEEK_SET);
if (ph1.black_col)
read_shorts ((ushort *) cblack[0], raw_height*2);
rblack = cblack + raw_height;
fseek (ifp, ph1.black_row, SEEK_SET);
if (ph1.black_row)
read_shorts ((ushort *) rblack[0], raw_width*2);
for (i=0; i < 256; i++)
curve[i] = i*i / 3.969 + 0.5;
for (row=0; row < raw_height; row++) {
fseek (ifp, data_offset + offset[row], SEEK_SET);
ph1_bits(-1);
pred[0] = pred[1] = 0;
for (col=0; col < raw_width; col++) {
if (col >= (raw_width & -8))
len[0] = len[1] = 14;
else if ((col & 7) == 0)
for (i=0; i < 2; i++) {
for (j=0; j < 5 && !ph1_bits(1); j++);
if (j--) len[i] = length[j*2 + ph1_bits(1)];
}
if ((i = len[col & 1]) == 14)
pixel[col] = pred[col & 1] = ph1_bits(16);
else
pixel[col] = pred[col & 1] += ph1_bits(i) + 1 - (1 << (i - 1));
if (pred[col & 1] >> 16) derror();
if (ph1.format == 5 && pixel[col] < 256)
pixel[col] = curve[pixel[col]];
}
for (col=0; col < raw_width; col++) {
i = (pixel[col] << 2*(ph1.format != 8)) - ph1.black
+ cblack[row][col >= ph1.split_col]
+ rblack[col][row >= ph1.split_row];
if (i > 0) RAW(row,col) = i;
}
}
free (pixel);
maximum = 0xfffc - ph1.black;
}
void CLASS hasselblad_load_raw()
{
struct jhead jh;
int shot, row, col, *back[5], len[2], diff[12], pred, sh, f, s, c;
unsigned upix, urow, ucol;
ushort *ip;
if (!ljpeg_start (&jh, 0)) return;
order = 0x4949;
ph1_bits(-1);
back[4] = (int *) calloc (raw_width, 3*sizeof **back);
merror (back[4], "hasselblad_load_raw()");
FORC3 back[c] = back[4] + c*raw_width;
cblack[6] >>= sh = tiff_samples > 1;
shot = LIM(shot_select, 1, tiff_samples) - 1;
for (row=0; row < raw_height; row++) {
FORC4 back[(c+3) & 3] = back[c];
for (col=0; col < raw_width; col+=2) {
for (s=0; s < tiff_samples*2; s+=2) {
FORC(2) len[c] = ph1_huff(jh.huff[0]);
FORC(2) {
diff[s+c] = ph1_bits(len[c]);
if ((diff[s+c] & (1 << (len[c]-1))) == 0)
diff[s+c] -= (1 << len[c]) - 1;
if (diff[s+c] == 65535) diff[s+c] = -32768;
}
}
for (s=col; s < col+2; s++) {
pred = 0x8000 + load_flags;
if (col) pred = back[2][s-2];
if (col && row > 1) switch (jh.psv) {
case 11: pred += back[0][s]/2 - back[0][s-2]/2; break;
}
f = (row & 1)*3 ^ ((col+s) & 1);
FORC (tiff_samples) {
pred += diff[(s & 1)*tiff_samples+c];
upix = pred >> sh & 0xffff;
if (raw_image && c == shot)
RAW(row,s) = upix;
if (image) {
urow = row-top_margin + (c & 1);
ucol = col-left_margin - ((c >> 1) & 1);
ip = &image[urow*width+ucol][f];
if (urow < height && ucol < width)
*ip = c < 4 ? upix : (*ip + upix) >> 1;
}
}
back[2][s] = pred;
}
}
}
free (back[4]);
ljpeg_end (&jh);
if (image) mix_green = 1;
}
void CLASS leaf_hdr_load_raw()
{
ushort *pixel=0;
unsigned tile=0, r, c, row, col;
if (!filters) {
pixel = (ushort *) calloc (raw_width, sizeof *pixel);
merror (pixel, "leaf_hdr_load_raw()");
}
FORC(tiff_samples)
for (r=0; r < raw_height; r++) {
if (r % tile_length == 0) {
fseek (ifp, data_offset + 4*tile++, SEEK_SET);
fseek (ifp, get4(), SEEK_SET);
}
if (filters && c != shot_select) continue;
if (filters) pixel = raw_image + r*raw_width;
read_shorts (pixel, raw_width);
if (!filters && (row = r - top_margin) < height)
for (col=0; col < width; col++)
image[row*width+col][c] = pixel[col+left_margin];
}
if (!filters) {
maximum = 0xffff;
raw_color = 1;
free (pixel);
}
}
void CLASS unpacked_load_raw()
{
int row, col, bits=0;
while (1 << ++bits < maximum);
read_shorts (raw_image, raw_width*raw_height);
for (row=0; row < raw_height; row++)
for (col=0; col < raw_width; col++)
if ((RAW(row,col) >>= load_flags) >> bits
&& (unsigned) (row-top_margin) < height
&& (unsigned) (col-left_margin) < width) derror();
}
void CLASS sinar_4shot_load_raw()
{
ushort *pixel;
unsigned shot, row, col, r, c;
if (raw_image) {
shot = LIM (shot_select, 1, 4) - 1;
fseek (ifp, data_offset + shot*4, SEEK_SET);
fseek (ifp, get4(), SEEK_SET);
unpacked_load_raw();
return;
}
pixel = (ushort *) calloc (raw_width, sizeof *pixel);
merror (pixel, "sinar_4shot_load_raw()");
for (shot=0; shot < 4; shot++) {
fseek (ifp, data_offset + shot*4, SEEK_SET);
fseek (ifp, get4(), SEEK_SET);
for (row=0; row < raw_height; row++) {
read_shorts (pixel, raw_width);
if ((r = row-top_margin - (shot >> 1 & 1)) >= height) continue;
for (col=0; col < raw_width; col++) {
if ((c = col-left_margin - (shot & 1)) >= width) continue;
image[r*width+c][(row & 1)*3 ^ (~col & 1)] = pixel[col];
}
}
}
free (pixel);
mix_green = 1;
}
void CLASS imacon_full_load_raw()
{
int row, col;
if (!image) return;
for (row=0; row < height; row++)
for (col=0; col < width; col++)
read_shorts (image[row*width+col], 3);
}
void CLASS packed_load_raw()
{
int vbits=0, bwide, rbits, bite, half, irow, row, col, val, i;
UINT64 bitbuf=0;
bwide = raw_width * tiff_bps / 8;
bwide += bwide & load_flags >> 9;
rbits = bwide * 8 - raw_width * tiff_bps;
if (load_flags & 1) bwide = bwide * 16 / 15;
bite = 8 + (load_flags & 56);
half = (raw_height+1) >> 1;
for (irow=0; irow < raw_height; irow++) {
row = irow;
if (load_flags & 2 &&
(row = irow % half * 2 + irow / half) == 1 &&
load_flags & 4) {
if (vbits=0, tiff_compress)
fseek (ifp, data_offset - (-half*bwide & -2048), SEEK_SET);
else {
fseek (ifp, 0, SEEK_END);
fseek (ifp, ftell(ifp) >> 3 << 2, SEEK_SET);
}
}
for (col=0; col < raw_width; col++) {
for (vbits -= tiff_bps; vbits < 0; vbits += bite) {
bitbuf <<= bite;
for (i=0; i < bite; i+=8)
bitbuf |= ((UINT64) fgetc(ifp) << i);
}
val = bitbuf << (64-tiff_bps-vbits) >> (64-tiff_bps);
RAW(row,col ^ (load_flags >> 6 & 3)) = val;
if (load_flags & 1 && (col % 10) == 9 && fgetc(ifp) &&
row < height+top_margin && col < width+left_margin) derror();
}
vbits -= rbits;
}
}
void CLASS nokia_load_raw()
{
uchar *data, *dp;
int rev, dwide, row, col, c;
double sum[]={0,0};
rev = 3 * (order == 0x4949);
dwide = (raw_width * 5 + 1) / 4;
data = (uchar *) malloc (dwide*2);
merror (data, "nokia_load_raw()");
for (row=0; row < raw_height; row++) {
if (fread (data+dwide, 1, dwide, ifp) < dwide) derror();
FORC(dwide) data[c] = data[dwide+(c ^ rev)];
for (dp=data, col=0; col < raw_width; dp+=5, col+=4)
FORC4 RAW(row,col+c) = (dp[c] << 2) | (dp[4] >> (c << 1) & 3);
}
free (data);
maximum = 0x3ff;
if (strcmp(make,"OmniVision")) return;
row = raw_height/2;
FORC(width-1) {
sum[ c & 1] += SQR(RAW(row,c)-RAW(row+1,c+1));
sum[~c & 1] += SQR(RAW(row+1,c)-RAW(row,c+1));
}
if (sum[1] > sum[0]) filters = 0x4b4b4b4b;
}
void CLASS canon_rmf_load_raw()
{
int row, col, bits, orow, ocol, c;
for (row=0; row < raw_height; row++)
for (col=0; col < raw_width-2; col+=3) {
bits = get4();
FORC3 {
orow = row;
if ((ocol = col+c-4) < 0) {
ocol += raw_width;
if ((orow -= 2) < 0)
orow += raw_height;
}
RAW(orow,ocol) = curve[bits >> (10*c+2) & 0x3ff];
}
}
maximum = curve[0x3ff];
}
unsigned CLASS pana_bits (int nbits)
{
static uchar buf[0x4000];
static int vbits;
int byte;
if (!nbits) return vbits=0;
if (!vbits) {
fread (buf+load_flags, 1, 0x4000-load_flags, ifp);
fread (buf, 1, load_flags, ifp);
}
vbits = (vbits - nbits) & 0x1ffff;
byte = vbits >> 3 ^ 0x3ff0;
return (buf[byte] | buf[byte+1] << 8) >> (vbits & 7) & ~(-1 << nbits);
}
void CLASS panasonic_load_raw()
{
int row, col, i, j, sh=0, pred[2], nonz[2];
pana_bits(0);
for (row=0; row < height; row++)
for (col=0; col < raw_width; col++) {
if ((i = col % 14) == 0)
pred[0] = pred[1] = nonz[0] = nonz[1] = 0;
if (i % 3 == 2) sh = 4 >> (3 - pana_bits(2));
if (nonz[i & 1]) {
if ((j = pana_bits(8))) {
if ((pred[i & 1] -= 0x80 << sh) < 0 || sh == 4)
pred[i & 1] &= ~(-1 << sh);
pred[i & 1] += j << sh;
}
} else if ((nonz[i & 1] = pana_bits(8)) || i > 11)
pred[i & 1] = nonz[i & 1] << 4 | pana_bits(4);
if ((RAW(row,col) = pred[col & 1]) > 4098 && col < width) derror();
}
}
void CLASS olympus_load_raw()
{
ushort huff[4096];
int row, col, nbits, sign, low, high, i, c, w, n, nw;
int acarry[2][3], *carry, pred, diff;
huff[n=0] = 0xc0c;
for (i=12; i--; )
FORC(2048 >> i) huff[++n] = (i+1) << 8 | i;
fseek (ifp, 7, SEEK_CUR);
getbits(-1);
for (row=0; row < height; row++) {
memset (acarry, 0, sizeof acarry);
for (col=0; col < raw_width; col++) {
carry = acarry[col & 1];
i = 2 * (carry[2] < 3);
for (nbits=2+i; (ushort) carry[0] >> (nbits+i); nbits++);
low = (sign = getbits(3)) & 3;
sign = sign << 29 >> 31;
if ((high = getbithuff(12,huff)) == 12)
high = getbits(16-nbits) >> 1;
carry[0] = (high << nbits) | getbits(nbits);
diff = (carry[0] ^ sign) + carry[1];
carry[1] = (diff*3 + carry[1]) >> 5;
carry[2] = carry[0] > 16 ? 0 : carry[2]+1;
if (col >= width) continue;
if (row < 2 && col < 2) pred = 0;
else if (row < 2) pred = RAW(row,col-2);
else if (col < 2) pred = RAW(row-2,col);
else {
w = RAW(row,col-2);
n = RAW(row-2,col);
nw = RAW(row-2,col-2);
if ((w < nw && nw < n) || (n < nw && nw < w)) {
if (ABS(w-nw) > 32 || ABS(n-nw) > 32)
pred = w + n - nw;
else pred = (w + n) >> 1;
} else pred = ABS(w-nw) > ABS(n-nw) ? w : n;
}
if ((RAW(row,col) = pred + ((diff << 2) | low)) >> 12) derror();
}
}
}
void CLASS canon_crx_load_raw()
{
}
#include "fuji_compressed.h"
void CLASS minolta_rd175_load_raw()
{
uchar pixel[768];
unsigned irow, box, row, col;
for (irow=0; irow < 1481; irow++) {
if (fread (pixel, 1, 768, ifp) < 768) derror();
box = irow / 82;
row = irow % 82 * 12 + ((box < 12) ? box | 1 : (box-12)*2);
switch (irow) {
case 1477: case 1479: continue;
case 1476: row = 984; break;
case 1480: row = 985; break;
case 1478: row = 985; box = 1;
}
if ((box < 12) && (box & 1)) {
for (col=0; col < 1533; col++, row ^= 1)
if (col != 1) RAW(row,col) = (col+1) & 2 ?
pixel[col/2-1] + pixel[col/2+1] : pixel[col/2] << 1;
RAW(row,1) = pixel[1] << 1;
RAW(row,1533) = pixel[765] << 1;
} else
for (col=row & 1; col < 1534; col+=2)
RAW(row,col) = pixel[col/2] << 1;
}
maximum = 0xff << 1;
}
void CLASS quicktake_100_load_raw()
{
uchar pixel[484][644];
static const short gstep[16] =
{ -89,-60,-44,-32,-22,-15,-8,-2,2,8,15,22,32,44,60,89 };
static const short rstep[6][4] =
{ { -3,-1,1,3 }, { -5,-1,1,5 }, { -8,-2,2,8 },
{ -13,-3,3,13 }, { -19,-4,4,19 }, { -28,-6,6,28 } };
static const short curve[256] =
{ 0,1,2,3,4,5,6,7,8,9,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,
28,29,30,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,53,
54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,74,75,76,77,78,
79,80,81,82,83,84,86,88,90,92,94,97,99,101,103,105,107,110,112,114,116,
118,120,123,125,127,129,131,134,136,138,140,142,144,147,149,151,153,155,
158,160,162,164,166,168,171,173,175,177,179,181,184,186,188,190,192,195,
197,199,201,203,205,208,210,212,214,216,218,221,223,226,230,235,239,244,
248,252,257,261,265,270,274,278,283,287,291,296,300,305,309,313,318,322,
326,331,335,339,344,348,352,357,361,365,370,374,379,383,387,392,396,400,
405,409,413,418,422,426,431,435,440,444,448,453,457,461,466,470,474,479,
483,487,492,496,500,508,519,531,542,553,564,575,587,598,609,620,631,643,
654,665,676,687,698,710,721,732,743,754,766,777,788,799,810,822,833,844,
855,866,878,889,900,911,922,933,945,956,967,978,989,1001,1012,1023 };
int rb, row, col, sharp, val=0;
getbits(-1);
memset (pixel, 0x80, sizeof pixel);
for (row=2; row < height+2; row++) {
for (col=2+(row & 1); col < width+2; col+=2) {
val = ((pixel[row-1][col-1] + 2*pixel[row-1][col+1] +
pixel[row][col-2]) >> 2) + gstep[getbits(4)];
pixel[row][col] = val = LIM(val,0,255);
if (col < 4)
pixel[row][col-2] = pixel[row+1][~row & 1] = val;
if (row == 2)
pixel[row-1][col+1] = pixel[row-1][col+3] = val;
}
pixel[row][col] = val;
}
for (rb=0; rb < 2; rb++)
for (row=2+rb; row < height+2; row+=2)
for (col=3-(row & 1); col < width+2; col+=2) {
if (row < 4 || col < 4) sharp = 2;
else {
val = ABS(pixel[row-2][col] - pixel[row][col-2])
+ ABS(pixel[row-2][col] - pixel[row-2][col-2])
+ ABS(pixel[row][col-2] - pixel[row-2][col-2]);
sharp = val < 4 ? 0 : val < 8 ? 1 : val < 16 ? 2 :
val < 32 ? 3 : val < 48 ? 4 : 5;
}
val = ((pixel[row-2][col] + pixel[row][col-2]) >> 1)
+ rstep[sharp][getbits(2)];
pixel[row][col] = val = LIM(val,0,255);
if (row < 4) pixel[row-2][col+2] = val;
if (col < 4) pixel[row+2][col-2] = val;
}
for (row=2; row < height+2; row++)
for (col=3-(row & 1); col < width+2; col+=2) {
val = ((pixel[row][col-1] + (pixel[row][col] << 2) +
pixel[row][col+1]) >> 1) - 0x100;
pixel[row][col] = LIM(val,0,255);
}
for (row=0; row < height; row++)
for (col=0; col < width; col++)
RAW(row,col) = curve[pixel[row+2][col+2]];
maximum = 0x3ff;
}
#define radc_token(tree) ((signed char) getbithuff(8,huff[tree]))
#define FORYX for (y=1; y < 3; y++) for (x=col+1; x >= col; x--)
#define PREDICTOR (c ? (buf[c][y-1][x] + buf[c][y][x+1]) / 2 \
: (buf[c][y-1][x+1] + 2*buf[c][y-1][x] + buf[c][y][x+1]) / 4)
void CLASS kodak_radc_load_raw()
{
static const char src[] = {
1,1, 2,3, 3,4, 4,2, 5,7, 6,5, 7,6, 7,8,
1,0, 2,1, 3,3, 4,4, 5,2, 6,7, 7,6, 8,5, 8,8,
2,1, 2,3, 3,0, 3,2, 3,4, 4,6, 5,5, 6,7, 6,8,
2,0, 2,1, 2,3, 3,2, 4,4, 5,6, 6,7, 7,5, 7,8,
2,1, 2,4, 3,0, 3,2, 3,3, 4,7, 5,5, 6,6, 6,8,
2,3, 3,1, 3,2, 3,4, 3,5, 3,6, 4,7, 5,0, 5,8,
2,3, 2,6, 3,0, 3,1, 4,4, 4,5, 4,7, 5,2, 5,8,
2,4, 2,7, 3,3, 3,6, 4,1, 4,2, 4,5, 5,0, 5,8,
2,6, 3,1, 3,3, 3,5, 3,7, 3,8, 4,0, 5,2, 5,4,
2,0, 2,1, 3,2, 3,3, 4,4, 4,5, 5,6, 5,7, 4,8,
1,0, 2,2, 2,-2,
1,-3, 1,3,
2,-17, 2,-5, 2,5, 2,17,
2,-7, 2,2, 2,9, 2,18,
2,-18, 2,-9, 2,-2, 2,7,
2,-28, 2,28, 3,-49, 3,-9, 3,9, 4,49, 5,-79, 5,79,
2,-1, 2,13, 2,26, 3,39, 4,-16, 5,55, 6,-37, 6,76,
2,-26, 2,-13, 2,1, 3,-39, 4,16, 5,-55, 6,-76, 6,37
};
ushort huff[19][256];
int row, col, tree, nreps, rep, step, i, c, s, r, x, y, val;
short last[3] = { 16,16,16 }, mul[3], buf[3][3][386];
static const ushort pt[] =
{ 0,0, 1280,1344, 2320,3616, 3328,8000, 4095,16383, 65535,16383 };
for (i=2; i < 12; i+=2)
for (c=pt[i-2]; c <= pt[i]; c++)
curve[c] = (float)
(c-pt[i-2]) / (pt[i]-pt[i-2]) * (pt[i+1]-pt[i-1]) + pt[i-1] + 0.5;
for (s=i=0; i < sizeof src; i+=2)
FORC(256 >> src[i])
((ushort *)huff)[s++] = src[i] << 8 | (uchar) src[i+1];
s = kodak_cbpp == 243 ? 2 : 3;
FORC(256) huff[18][c] = (8-s) << 8 | c >> s << s | 1 << (s-1);
getbits(-1);
for (i=0; i < sizeof(buf)/sizeof(short); i++)
((short *)buf)[i] = 2048;
for (row=0; row < height; row+=4) {
FORC3 mul[c] = getbits(6);
FORC3 {
val = ((0x1000000/last[c] + 0x7ff) >> 12) * mul[c];
s = val > 65564 ? 10:12;
x = ~(-1 << (s-1));
val <<= 12-s;
for (i=0; i < sizeof(buf[0])/sizeof(short); i++)
((short *)buf[c])[i] = (((short *)buf[c])[i] * val + x) >> s;
last[c] = mul[c];
for (r=0; r <= !c; r++) {
buf[c][1][width/2] = buf[c][2][width/2] = mul[c] << 7;
for (tree=1, col=width/2; col > 0; ) {
if ((tree = radc_token(tree))) {
col -= 2;
if (tree == 8)
FORYX buf[c][y][x] = (uchar) radc_token(18) * mul[c];
else
FORYX buf[c][y][x] = radc_token(tree+10) * 16 + PREDICTOR;
} else
do {
nreps = (col > 2) ? radc_token(9) + 1 : 1;
for (rep=0; rep < 8 && rep < nreps && col > 0; rep++) {
col -= 2;
FORYX buf[c][y][x] = PREDICTOR;
if (rep & 1) {
step = radc_token(10) << 4;
FORYX buf[c][y][x] += step;
}
}
} while (nreps == 9);
}
for (y=0; y < 2; y++)
for (x=0; x < width/2; x++) {
val = (buf[c][y+1][x] << 4) / mul[c];
if (val < 0) val = 0;
if (c) RAW(row+y*2+c-1,x*2+2-c) = val;
else RAW(row+r*2+y,x*2+y) = val;
}
memcpy (buf[c][0]+!c, buf[c][2], sizeof buf[c][0]-2*!c);
}
}
for (y=row; y < row+4; y++)
for (x=0; x < width; x++)
if ((x+y) & 1) {
r = x ? x-1 : x+1;
s = x+1 < width ? x+1 : x-1;
val = (RAW(y,x)-2048)*2 + (RAW(y,r)+RAW(y,s))/2;
if (val < 0) val = 0;
RAW(y,x) = val;
}
}
for (i=0; i < height*width; i++)
raw_image[i] = curve[raw_image[i]];
maximum = 0x3fff;
}
#undef FORYX
#undef PREDICTOR
#ifdef NO_JPEG
void CLASS kodak_jpeg_load_raw() {}
void CLASS lossy_dng_load_raw() {}
#else
METHODDEF(boolean)
fill_input_buffer (j_decompress_ptr cinfo)
{
static uchar jpeg_buffer[4096];
size_t nbytes;
nbytes = fread (jpeg_buffer, 1, 4096, ifp);
swab (jpeg_buffer, jpeg_buffer, nbytes);
cinfo->src->next_input_byte = jpeg_buffer;
cinfo->src->bytes_in_buffer = nbytes;
return TRUE;
}
void CLASS kodak_jpeg_load_raw()
{
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
JSAMPARRAY buf;
JSAMPLE (*pixel)[3];
int row, col;
cinfo.err = jpeg_std_error (&jerr);
jpeg_create_decompress (&cinfo);
jpeg_stdio_src (&cinfo, ifp);
cinfo.src->fill_input_buffer = fill_input_buffer;
jpeg_read_header (&cinfo, TRUE);
jpeg_start_decompress (&cinfo);
if ((cinfo.output_width != width ) ||
(cinfo.output_height*2 != height ) ||
(cinfo.output_components != 3 )) {
fprintf (stderr,_("%s: incorrect JPEG dimensions\n"), ifname);
jpeg_destroy_decompress (&cinfo);
longjmp (failure, 3);
}
buf = (*cinfo.mem->alloc_sarray)
((j_common_ptr) &cinfo, JPOOL_IMAGE, width*3, 1);
while (cinfo.output_scanline < cinfo.output_height) {
row = cinfo.output_scanline * 2;
jpeg_read_scanlines (&cinfo, buf, 1);
pixel = (JSAMPLE (*)[3]) buf[0];
for (col=0; col < width; col+=2) {
RAW(row+0,col+0) = pixel[col+0][1] << 1;
RAW(row+1,col+1) = pixel[col+1][1] << 1;
RAW(row+0,col+1) = pixel[col][0] + pixel[col+1][0];
RAW(row+1,col+0) = pixel[col][2] + pixel[col+1][2];
}
}
jpeg_finish_decompress (&cinfo);
jpeg_destroy_decompress (&cinfo);
maximum = 0xff << 1;
}
void CLASS gamma_curve (double pwr, double ts, int mode, int imax);
void CLASS lossy_dng_load_raw()
{
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
JSAMPARRAY buf;
JSAMPLE (*pixel)[3];
unsigned sorder=order, ntags, opcode, deg, i, j, c;
unsigned save=data_offset-4, trow=0, tcol=0, row, col;
ushort cur[3][256];
double coeff[9], tot;
if (meta_offset) {
fseek (ifp, meta_offset, SEEK_SET);
order = 0x4d4d;
ntags = get4();
while (ntags--) {
opcode = get4(); get4(); get4();
if (opcode != 8)
{ fseek (ifp, get4(), SEEK_CUR); continue; }
fseek (ifp, 20, SEEK_CUR);
if ((c = get4()) > 2) break;
fseek (ifp, 12, SEEK_CUR);
if ((deg = get4()) > 8) break;
for (i=0; i <= deg && i < 9; i++)
coeff[i] = getreal(12);
for (i=0; i < 256; i++) {
for (tot=j=0; j <= deg; j++)
tot += coeff[j] * pow(i/255.0, j);
cur[c][i] = tot*0xffff;
}
}
order = sorder;
} else {
gamma_curve (1/2.4, 12.92, 1, 255);
FORC3 memcpy (cur[c], curve, sizeof cur[0]);
}
cinfo.err = jpeg_std_error (&jerr);
jpeg_create_decompress (&cinfo);
while (trow < raw_height) {
fseek (ifp, save+=4, SEEK_SET);
if (tile_length < INT_MAX)
fseek (ifp, get4(), SEEK_SET);
jpeg_stdio_src (&cinfo, ifp);
jpeg_read_header (&cinfo, TRUE);
jpeg_start_decompress (&cinfo);
buf = (*cinfo.mem->alloc_sarray)
((j_common_ptr) &cinfo, JPOOL_IMAGE, cinfo.output_width*3, 1);
while (cinfo.output_scanline < cinfo.output_height &&
(row = trow + cinfo.output_scanline) < height) {
jpeg_read_scanlines (&cinfo, buf, 1);
pixel = (JSAMPLE (*)[3]) buf[0];
for (col=0; col < cinfo.output_width && tcol+col < width; col++) {
FORC3 image[row*width+tcol+col][c] = cur[c][pixel[col][c]];
}
}
jpeg_abort_decompress (&cinfo);
if ((tcol += tile_width) >= raw_width)
trow += tile_length + (tcol = 0);
}
jpeg_destroy_decompress (&cinfo);
maximum = 0xffff;
}
#endif
void CLASS kodak_dc120_load_raw()
{
static const int mul[4] = { 162, 192, 187, 92 };
static const int add[4] = { 0, 636, 424, 212 };
uchar pixel[848];
int row, shift, col;
for (row=0; row < height; row++) {
if (fread (pixel, 1, 848, ifp) < 848) derror();
shift = row * mul[row & 3] + add[row & 3];
for (col=0; col < width; col++)
RAW(row,col) = (ushort) pixel[(col + shift) % 848];
}
maximum = 0xff;
}
void CLASS eight_bit_load_raw()
{
uchar *pixel;
unsigned row, col;
pixel = (uchar *) calloc (raw_width, sizeof *pixel);
merror (pixel, "eight_bit_load_raw()");
for (row=0; row < raw_height; row++) {
if (fread (pixel, 1, raw_width, ifp) < raw_width) derror();
for (col=0; col < raw_width; col++)
RAW(row,col) = curve[pixel[col]];
}
free (pixel);
maximum = curve[0xff];
}
void CLASS kodak_c330_load_raw()
{
uchar *pixel;
int row, col, y, cb, cr, rgb[3], c;
pixel = (uchar *) calloc (raw_width, 2*sizeof *pixel);
merror (pixel, "kodak_c330_load_raw()");
for (row=0; row < height; row++) {
if (fread (pixel, raw_width, 2, ifp) < 2) derror();
if (load_flags && (row & 31) == 31)
fseek (ifp, raw_width*32, SEEK_CUR);
for (col=0; col < width; col++) {
y = pixel[col*2];
cb = pixel[(col*2 & -4) | 1] - 128;
cr = pixel[(col*2 & -4) | 3] - 128;
rgb[1] = y - ((cb + cr + 2) >> 2);
rgb[2] = rgb[1] + cb;
rgb[0] = rgb[1] + cr;
FORC3 image[row*width+col][c] = curve[LIM(rgb[c],0,255)];
}
}
free (pixel);
maximum = curve[0xff];
}
void CLASS kodak_c603_load_raw()
{
uchar *pixel;
int row, col, y, cb, cr, rgb[3], c;
pixel = (uchar *) calloc (raw_width, 3*sizeof *pixel);
merror (pixel, "kodak_c603_load_raw()");
for (row=0; row < height; row++) {
if (~row & 1)
if (fread (pixel, raw_width, 3, ifp) < 3) derror();
for (col=0; col < width; col++) {
y = pixel[width*2*(row & 1) + col];
cb = pixel[width + (col & -2)] - 128;
cr = pixel[width + (col & -2)+1] - 128;
rgb[1] = y - ((cb + cr + 2) >> 2);
rgb[2] = rgb[1] + cb;
rgb[0] = rgb[1] + cr;
FORC3 image[row*width+col][c] = curve[LIM(rgb[c],0,255)];
}
}
free (pixel);
maximum = curve[0xff];
}
void CLASS kodak_262_load_raw()
{
static const uchar kodak_tree[2][26] =
{ { 0,1,5,1,1,2,0,0,0,0,0,0,0,0,0,0, 0,1,2,3,4,5,6,7,8,9 },
{ 0,3,1,1,1,1,1,2,0,0,0,0,0,0,0,0, 0,1,2,3,4,5,6,7,8,9 } };
ushort *huff[2];
uchar *pixel;
int *strip, ns, c, row, col, chess, pi=0, pi1, pi2, pred, val;
FORC(2) huff[c] = make_decoder (kodak_tree[c]);
ns = (raw_height+63) >> 5;
pixel = (uchar *) malloc (raw_width*32 + ns*4);
merror (pixel, "kodak_262_load_raw()");
strip = (int *) (pixel + raw_width*32);
order = 0x4d4d;
FORC(ns) strip[c] = get4();
for (row=0; row < raw_height; row++) {
if ((row & 31) == 0) {
fseek (ifp, strip[row >> 5], SEEK_SET);
getbits(-1);
pi = 0;
}
for (col=0; col < raw_width; col++) {
chess = (row + col) & 1;
pi1 = chess ? pi-2 : pi-raw_width-1;
pi2 = chess ? pi-2*raw_width : pi-raw_width+1;
if (col <= chess) pi1 = -1;
if (pi1 < 0) pi1 = pi2;
if (pi2 < 0) pi2 = pi1;
if (pi1 < 0 && col > 1) pi1 = pi2 = pi-2;
pred = (pi1 < 0) ? 0 : (pixel[pi1] + pixel[pi2]) >> 1;
pixel[pi] = val = pred + ljpeg_diff (huff[chess]);
if (val >> 8) derror();
val = curve[pixel[pi++]];
RAW(row,col) = val;
}
}
free (pixel);
FORC(2) free (huff[c]);
}
int CLASS kodak_65000_decode (short *out, int bsize)
{
uchar c, blen[768];
ushort raw[6];
INT64 bitbuf=0;
int save, bits=0, i, j, len, diff;
save = ftell(ifp);
bsize = (bsize + 3) & -4;
for (i=0; i < bsize; i+=2) {
c = fgetc(ifp);
if ((blen[i ] = c & 15) > 12 ||
(blen[i+1] = c >> 4) > 12 ) {
fseek (ifp, save, SEEK_SET);
for (i=0; i < bsize; i+=8) {
read_shorts (raw, 6);
out[i ] = raw[0] >> 12 << 8 | raw[2] >> 12 << 4 | raw[4] >> 12;
out[i+1] = raw[1] >> 12 << 8 | raw[3] >> 12 << 4 | raw[5] >> 12;
for (j=0; j < 6; j++)
out[i+2+j] = raw[j] & 0xfff;
}
return 1;
}
}
if ((bsize & 7) == 4) {
bitbuf = fgetc(ifp) << 8;
bitbuf += fgetc(ifp);
bits = 16;
}
for (i=0; i < bsize; i++) {
len = blen[i];
if (bits < len) {
for (j=0; j < 32; j+=8)
bitbuf += (INT64) fgetc(ifp) << (bits+(j^8));
bits += 32;
}
diff = bitbuf & (0xffff >> (16-len));
bitbuf >>= len;
bits -= len;
if ((diff & (1 << (len-1))) == 0)
diff -= (1 << len) - 1;
out[i] = diff;
}
return 0;
}
void CLASS kodak_65000_load_raw()
{
short buf[256];
int row, col, len, pred[2], ret, i;
for (row=0; row < height; row++)
for (col=0; col < width; col+=256) {
pred[0] = pred[1] = 0;
len = MIN (256, width-col);
ret = kodak_65000_decode (buf, len);
for (i=0; i < len; i++)
if ((RAW(row,col+i) = curve[ret ? buf[i] :
(pred[i & 1] += buf[i])]) >> 12) derror();
}
}
void CLASS kodak_ycbcr_load_raw()
{
short buf[384], *bp;
int row, col, len, c, i, j, k, y[2][2], cb, cr, rgb[3];
ushort *ip;
if (!image) return;
for (row=0; row < height; row+=2)
for (col=0; col < width; col+=128) {
len = MIN (128, width-col);
kodak_65000_decode (buf, len*3);
y[0][1] = y[1][1] = cb = cr = 0;
for (bp=buf, i=0; i < len; i+=2, bp+=2) {
cb += bp[4];
cr += bp[5];
rgb[1] = -((cb + cr + 2) >> 2);
rgb[2] = rgb[1] + cb;
rgb[0] = rgb[1] + cr;
for (j=0; j < 2; j++)
for (k=0; k < 2; k++) {
if ((y[j][k] = y[j][k^1] + *bp++) >> 10) derror();
ip = image[(row+j)*width + col+i+k];
FORC3 ip[c] = curve[LIM(y[j][k]+rgb[c], 0, 0xfff)];
}
}
}
}
void CLASS kodak_rgb_load_raw()
{
short buf[768], *bp;
int row, col, len, c, i, rgb[3];
ushort *ip=image[0];
for (row=0; row < height; row++)
for (col=0; col < width; col+=256) {
len = MIN (256, width-col);
kodak_65000_decode (buf, len*3);
memset (rgb, 0, sizeof rgb);
for (bp=buf, i=0; i < len; i++, ip+=4)
FORC3 if ((ip[c] = rgb[c] += *bp++) >> 12) derror();
}
}
void CLASS kodak_thumb_load_raw()
{
int row, col;
colors = thumb_misc >> 5;
for (row=0; row < height; row++)
for (col=0; col < width; col++)
read_shorts (image[row*width+col], colors);
maximum = (1 << (thumb_misc & 31)) - 1;
}
void CLASS sony_decrypt (unsigned *data, int len, int start, int key)
{
static unsigned pad[128], p;
if (start) {
for (p=0; p < 4; p++)
pad[p] = key = key * 48828125 + 1;
pad[3] = pad[3] << 1 | (pad[0]^pad[2]) >> 31;
for (p=4; p < 127; p++)
pad[p] = (pad[p-4]^pad[p-2]) << 1 | (pad[p-3]^pad[p-1]) >> 31;
for (p=0; p < 127; p++)
pad[p] = htonl(pad[p]);
}
while (len-- && p++)
*data++ ^= pad[(p-1) & 127] = pad[p & 127] ^ pad[(p+64) & 127];
}
void CLASS sony_load_raw()
{
uchar head[40];
ushort *pixel;
unsigned i, key, row, col;
fseek (ifp, 200896, SEEK_SET);
fseek (ifp, (unsigned) fgetc(ifp)*4 - 1, SEEK_CUR);
order = 0x4d4d;
key = get4();
fseek (ifp, 164600, SEEK_SET);
fread (head, 1, 40, ifp);
sony_decrypt ((unsigned *) head, 10, 1, key);
for (i=26; i-- > 22; )
key = key << 8 | head[i];
fseek (ifp, data_offset, SEEK_SET);
for (row=0; row < raw_height; row++) {
pixel = raw_image + row*raw_width;
if (fread (pixel, 2, raw_width, ifp) < raw_width) derror();
sony_decrypt ((unsigned *) pixel, raw_width/2, !row, key);
for (col=0; col < raw_width; col++)
if ((pixel[col] = ntohs(pixel[col])) >> 14) derror();
}
maximum = 0x3ff0;
}
void CLASS sony_arw_load_raw()
{
ushort huff[32770];
static const ushort tab[18] =
{ 0xf11,0xf10,0xe0f,0xd0e,0xc0d,0xb0c,0xa0b,0x90a,0x809,
0x708,0x607,0x506,0x405,0x304,0x303,0x300,0x202,0x201 };
int i, c, n, col, row, sum=0;
huff[0] = 15;
for (n=i=0; i < 18; i++)
FORC(32768 >> (tab[i] >> 8)) huff[++n] = tab[i];
getbits(-1);
for (col = raw_width; col--; )
for (row=0; row < raw_height+1; row+=2) {
if (row == raw_height) row = 1;
if ((sum += ljpeg_diff(huff)) >> 12) derror();
if (row < height) RAW(row,col) = sum;
}
}
void CLASS sony_arw2_load_raw()
{
uchar *data, *dp;
ushort pix[16];
int row, col, val, max, min, imax, imin, sh, bit, i;
data = (uchar *) malloc (raw_width+1);
merror (data, "sony_arw2_load_raw()");
for (row=0; row < height; row++) {
fread (data, 1, raw_width, ifp);
for (dp=data, col=0; col < raw_width-30; dp+=16) {
max = 0x7ff & (val = sget4(dp));
min = 0x7ff & val >> 11;
imax = 0x0f & val >> 22;
imin = 0x0f & val >> 26;
for (sh=0; sh < 4 && 0x80 << sh <= max-min; sh++);
for (bit=30, i=0; i < 16; i++)
if (i == imax) pix[i] = max;
else if (i == imin) pix[i] = min;
else {
pix[i] = ((sget2(dp+(bit >> 3)) >> (bit & 7) & 0x7f) << sh) + min;
if (pix[i] > 0x7ff) pix[i] = 0x7ff;
bit += 7;
}
for (i=0; i < 16; i++, col+=2)
RAW(row,col) = curve[pix[i] << 1] >> 2;
col -= col & 1 ? 1:31;
}
}
free (data);
}
void CLASS samsung_load_raw()
{
int row, col, c, i, dir, op[4], len[4];
order = 0x4949;
for (row=0; row < raw_height; row++) {
fseek (ifp, strip_offset+row*4, SEEK_SET);
fseek (ifp, data_offset+get4(), SEEK_SET);
ph1_bits(-1);
FORC4 len[c] = row < 2 ? 7:4;
for (col=0; col < raw_width; col+=16) {
dir = ph1_bits(1);
FORC4 op[c] = ph1_bits(2);
FORC4 switch (op[c]) {
case 3: len[c] = ph1_bits(4); break;
case 2: len[c]--; break;
case 1: len[c]++;
}
for (c=0; c < 16; c+=2) {
i = len[((c & 1) << 1) | (c >> 3)];
RAW(row,col+c) = ((signed) ph1_bits(i) << (32-i) >> (32-i)) +
(dir ? RAW(row+(~c | -2),col+c) : col ? RAW(row,col+(c | -2)) : 128);
if (c == 14) c = -1;
}
}
}
for (row=0; row < raw_height-1; row+=2)
for (col=0; col < raw_width-1; col+=2)
SWAP (RAW(row,col+1), RAW(row+1,col));
}
void CLASS samsung2_load_raw()
{
static const ushort tab[14] =
{ 0x304,0x307,0x206,0x205,0x403,0x600,0x709,
0x80a,0x90b,0xa0c,0xa0d,0x501,0x408,0x402 };
ushort huff[1026], vpred[2][2] = {{0,0},{0,0}}, hpred[2];
int i, c, n, row, col, diff;
huff[0] = 10;
for (n=i=0; i < 14; i++)
FORC(1024 >> (tab[i] >> 8)) huff[++n] = tab[i];
getbits(-1);
for (row=0; row < raw_height; row++)
for (col=0; col < raw_width; col++) {
diff = ljpeg_diff (huff);
if (col < 2) hpred[col] = vpred[row & 1][col] += diff;
else hpred[col & 1] += diff;
RAW(row,col) = hpred[col & 1];
if (hpred[col & 1] >> tiff_bps) derror();
}
}
void CLASS samsung3_load_raw()
{
int opt, init, mag, pmode, row, tab, col, pred, diff, i, c;
ushort lent[3][2], len[4], *prow[2];
order = 0x4949;
fseek (ifp, 9, SEEK_CUR);
opt = fgetc(ifp);
init = (get2(),get2());
for (row=0; row < raw_height; row++) {
fseek (ifp, (data_offset-ftell(ifp)) & 15, SEEK_CUR);
ph1_bits(-1);
mag = 0; pmode = 7;
FORC(6) ((ushort *)lent)[c] = row < 2 ? 7:4;
prow[ row & 1] = &RAW(row-1,1-((row & 1) << 1)); // green
prow[~row & 1] = &RAW(row-2,0); // red and blue
for (tab=0; tab+15 < raw_width; tab+=16) {
if (~opt & 4 && !(tab & 63)) {
i = ph1_bits(2);
mag = i < 3 ? mag-'2'+"204"[i] : ph1_bits(12);
}
if (opt & 2)
pmode = 7 - 4*ph1_bits(1);
else if (!ph1_bits(1))
pmode = ph1_bits(3);
if (opt & 1 || !ph1_bits(1)) {
FORC4 len[c] = ph1_bits(2);
FORC4 {
i = ((row & 1) << 1 | (c & 1)) % 3;
len[c] = len[c] < 3 ? lent[i][0]-'1'+"120"[len[c]] : ph1_bits(4);
lent[i][0] = lent[i][1];
lent[i][1] = len[c];
}
}
FORC(16) {
col = tab + (((c & 7) << 1)^(c >> 3)^(row & 1));
pred = (pmode == 7 || row < 2)
? (tab ? RAW(row,tab-2+(col & 1)) : init)
: (prow[col & 1][col-'4'+"0224468"[pmode]] +
prow[col & 1][col-'4'+"0244668"[pmode]] + 1) >> 1;
diff = ph1_bits (i = len[c >> 2]);
if (diff >> (i-1)) diff -= 1 << i;
diff = diff * (mag*2+1) + mag;
RAW(row,col) = pred + diff;
}
}
}
}
#define HOLE(row) ((holes >> (((row) - raw_height) & 7)) & 1)
/* Kudos to Rich Taylor for figuring out SMaL's compression algorithm. */
void CLASS smal_decode_segment (unsigned seg[2][2], int holes)
{
uchar hist[3][13] = {
{ 7, 7, 0, 0, 63, 55, 47, 39, 31, 23, 15, 7, 0 },
{ 7, 7, 0, 0, 63, 55, 47, 39, 31, 23, 15, 7, 0 },
{ 3, 3, 0, 0, 63, 47, 31, 15, 0 } };
int low, high=0xff, carry=0, nbits=8;
int pix, s, count, bin, next, i, sym[3];
uchar diff, pred[]={0,0};
ushort data=0, range=0;
fseek (ifp, seg[0][1]+1, SEEK_SET);
getbits(-1);
if (seg[1][0] > raw_width*raw_height)
seg[1][0] = raw_width*raw_height;
for (pix=seg[0][0]; pix < seg[1][0]; pix++) {
for (s=0; s < 3; s++) {
data = data << nbits | getbits(nbits);
if (carry < 0)
carry = (nbits += carry+1) < 1 ? nbits-1 : 0;
while (--nbits >= 0)
if ((data >> nbits & 0xff) == 0xff) break;
if (nbits > 0)
data = ((data & ((1 << (nbits-1)) - 1)) << 1) |
((data + (((data & (1 << (nbits-1)))) << 1)) & (-1 << nbits));
if (nbits >= 0) {
data += getbits(1);
carry = nbits - 8;
}
count = ((((data-range+1) & 0xffff) << 2) - 1) / (high >> 4);
for (bin=0; hist[s][bin+5] > count; bin++);
low = hist[s][bin+5] * (high >> 4) >> 2;
if (bin) high = hist[s][bin+4] * (high >> 4) >> 2;
high -= low;
for (nbits=0; high << nbits < 128; nbits++);
range = (range+low) << nbits;
high <<= nbits;
next = hist[s][1];
if (++hist[s][2] > hist[s][3]) {
next = (next+1) & hist[s][0];
hist[s][3] = (hist[s][next+4] - hist[s][next+5]) >> 2;
hist[s][2] = 1;
}
if (hist[s][hist[s][1]+4] - hist[s][hist[s][1]+5] > 1) {
if (bin < hist[s][1])
for (i=bin; i < hist[s][1]; i++) hist[s][i+5]--;
else if (next <= bin)
for (i=hist[s][1]; i < bin; i++) hist[s][i+5]++;
}
hist[s][1] = next;
sym[s] = bin;
}
diff = sym[2] << 5 | sym[1] << 2 | (sym[0] & 3);
if (sym[0] & 4)
diff = diff ? -diff : 0x80;
if (ftell(ifp) + 12 >= seg[1][1])
diff = 0;
raw_image[pix] = pred[pix & 1] += diff;
if (!(pix & 1) && HOLE(pix / raw_width)) pix += 2;
}
maximum = 0xff;
}
void CLASS smal_v6_load_raw()
{
unsigned seg[2][2];
fseek (ifp, 16, SEEK_SET);
seg[0][0] = 0;
seg[0][1] = get2();
seg[1][0] = raw_width * raw_height;
seg[1][1] = INT_MAX;
smal_decode_segment (seg, 0);
}
int CLASS median4 (int *p)
{
int min, max, sum, i;
min = max = sum = p[0];
for (i=1; i < 4; i++) {
sum += p[i];
if (min > p[i]) min = p[i];
if (max < p[i]) max = p[i];
}
return (sum - min - max) >> 1;
}
void CLASS fill_holes (int holes)
{
int row, col, val[4];
for (row=2; row < height-2; row++) {
if (!HOLE(row)) continue;
for (col=1; col < width-1; col+=4) {
val[0] = RAW(row-1,col-1);
val[1] = RAW(row-1,col+1);
val[2] = RAW(row+1,col-1);
val[3] = RAW(row+1,col+1);
RAW(row,col) = median4(val);
}
for (col=2; col < width-2; col+=4)
if (HOLE(row-2) || HOLE(row+2))
RAW(row,col) = (RAW(row,col-2) + RAW(row,col+2)) >> 1;
else {
val[0] = RAW(row,col-2);
val[1] = RAW(row,col+2);
val[2] = RAW(row-2,col);
val[3] = RAW(row+2,col);
RAW(row,col) = median4(val);
}
}
}
void CLASS smal_v9_load_raw()
{
unsigned seg[256][2], offset, nseg, holes, i;
fseek (ifp, 67, SEEK_SET);
offset = get4();
nseg = (uchar) fgetc(ifp);
fseek (ifp, offset, SEEK_SET);
for (i=0; i < nseg*2; i++)
((unsigned *)seg)[i] = get4() + data_offset*(i & 1);
fseek (ifp, 78, SEEK_SET);
holes = fgetc(ifp);
fseek (ifp, 88, SEEK_SET);
seg[nseg][0] = raw_height * raw_width;
seg[nseg][1] = get4() + data_offset;
for (i=0; i < nseg; i++)
smal_decode_segment (seg+i, holes);
if (holes) fill_holes (holes);
}
void CLASS redcine_load_raw()
{
#ifndef NO_JASPER
int c, row, col;
jas_stream_t *in;
jas_image_t *jimg;
jas_matrix_t *jmat;
jas_seqent_t *data;
ushort *img, *pix;
jas_init();
in = jas_stream_fopen (ifname, "rb");
jas_stream_seek (in, data_offset+20, SEEK_SET);
jimg = jas_image_decode (in, -1, 0);
if (!jimg) longjmp (failure, 3);
jmat = jas_matrix_create (height/2, width/2);
merror (jmat, "redcine_load_raw()");
img = (ushort *) calloc ((height+2), (width+2)*2);
merror (img, "redcine_load_raw()");
FORC4 {
jas_image_readcmpt (jimg, c, 0, 0, width/2, height/2, jmat);
data = jas_matrix_getref (jmat, 0, 0);
for (row = c >> 1; row < height; row+=2)
for (col = c & 1; col < width; col+=2)
img[(row+1)*(width+2)+col+1] = data[(row/2)*(width/2)+col/2];
}
for (col=1; col <= width; col++) {
img[col] = img[2*(width+2)+col];
img[(height+1)*(width+2)+col] = img[(height-1)*(width+2)+col];
}
for (row=0; row < height+2; row++) {
img[row*(width+2)] = img[row*(width+2)+2];
img[(row+1)*(width+2)-1] = img[(row+1)*(width+2)-3];
}
for (row=1; row <= height; row++) {
pix = img + row*(width+2) + (col = 1 + (FC(row,1) & 1));
for ( ; col <= width; col+=2, pix+=2) {
c = (((pix[0] - 0x800) << 3) +
pix[-(width+2)] + pix[width+2] + pix[-1] + pix[1]) >> 2;
pix[0] = LIM(c,0,4095);
}
}
for (row=0; row < height; row++)
for (col=0; col < width; col++)
RAW(row,col) = curve[img[(row+1)*(width+2)+col+1]];
free (img);
jas_matrix_destroy (jmat);
jas_image_destroy (jimg);
jas_stream_close (in);
#endif
}
void CLASS crop_masked_pixels()
{
int row, col;
unsigned r, c, m, mblack[8], zero, val;
if (load_raw == &CLASS phase_one_load_raw ||
load_raw == &CLASS phase_one_load_raw_c)
phase_one_correct();
if (fuji_width) {
for (row=0; row < raw_height-top_margin*2; row++) {
for (col=0; col < fuji_width << !fuji_layout; col++) {
if (fuji_layout) {
r = fuji_width - 1 - col + (row >> 1);
c = col + ((row+1) >> 1);
} else {
r = fuji_width - 1 + row - (col >> 1);
c = row + ((col+1) >> 1);
}
if (r < height && c < width)
BAYER(r,c) = RAW(row+top_margin,col+left_margin);
}
}
} else {
for (row=0; row < height; row++)
for (col=0; col < width; col++)
BAYER2(row,col) = RAW(row+top_margin,col+left_margin);
}
if (mask[0][3] > 0) goto mask_set;
if (load_raw == &CLASS canon_load_raw ||
load_raw == &CLASS lossless_jpeg_load_raw) {
mask[0][1] = mask[1][1] += 2;
mask[0][3] -= 2;
goto sides;
}
if (load_raw == &CLASS canon_600_load_raw ||
load_raw == &CLASS sony_load_raw ||
(load_raw == &CLASS eight_bit_load_raw && strncmp(model,"DC2",3)) ||
load_raw == &CLASS kodak_262_load_raw ||
(load_raw == &CLASS packed_load_raw && (load_flags & 256))) {
sides:
mask[0][0] = mask[1][0] = top_margin;
mask[0][2] = mask[1][2] = top_margin+height;
mask[0][3] += left_margin;
mask[1][1] += left_margin+width;
mask[1][3] += raw_width;
}
if (load_raw == &CLASS nokia_load_raw) {
mask[0][2] = top_margin;
mask[0][3] = width;
}
mask_set:
memset (mblack, 0, sizeof mblack);
for (zero=m=0; m < 8; m++)
for (row=MAX(mask[m][0],0); row < MIN(mask[m][2],raw_height); row++)
for (col=MAX(mask[m][1],0); col < MIN(mask[m][3],raw_width); col++) {
c = FC(row-top_margin,col-left_margin);
mblack[c] += val = RAW(row,col);
mblack[4+c]++;
zero += !val;
}
if (load_raw == &CLASS canon_600_load_raw && width < raw_width) {
black = (mblack[0]+mblack[1]+mblack[2]+mblack[3]) /
(mblack[4]+mblack[5]+mblack[6]+mblack[7]) - 4;
canon_600_correct();
} else if (zero < mblack[4] && mblack[5] && mblack[6] && mblack[7]) {
FORC4 cblack[c] = mblack[c] / mblack[4+c];
cblack[4] = cblack[5] = cblack[6] = 0;
}
}
void CLASS remove_zeroes()
{
unsigned row, col, tot, n, r, c;
for (row=0; row < height; row++)
for (col=0; col < width; col++)
if (BAYER(row,col) == 0) {
tot = n = 0;
for (r = row-2; r <= row+2; r++)
for (c = col-2; c <= col+2; c++)
if (r < height && c < width &&
FC(r,c) == FC(row,col) && BAYER(r,c))
tot += (n++,BAYER(r,c));
if (n) BAYER(row,col) = tot/n;
}
}
/*
Seach from the current directory up to the root looking for
a ".badpixels" file, and fix those pixels now.
*/
void CLASS bad_pixels (const char *cfname)
{
FILE *fp=0;
char *fname, *cp, line[128];
int len, time, row, col, r, c, rad, tot, n, fixed=0;
if (!filters) return;
if (cfname)
fp = fopen (cfname, "r");
else {
for (len=32 ; ; len *= 2) {
fname = (char *) malloc (len);
if (!fname) return;
if (getcwd (fname, len-16)) break;
free (fname);
if (errno != ERANGE) return;
}
#if defined(WIN32) || defined(DJGPP)
if (fname[1] == ':')
memmove (fname, fname+2, len-2);
for (cp=fname; *cp; cp++)
if (*cp == '\\') *cp = '/';
#endif
cp = fname + strlen(fname);
if (cp[-1] == '/') cp--;
while (*fname == '/') {
strcpy (cp, "/.badpixels");
if ((fp = fopen (fname, "r"))) break;
if (cp == fname) break;
while (*--cp != '/');
}
free (fname);
}
if (!fp) return;
while (fgets (line, 128, fp)) {
cp = strchr (line, '#');
if (cp) *cp = 0;
if (sscanf (line, "%d %d %d", &col, &row, &time) != 3) continue;
if ((unsigned) col >= width || (unsigned) row >= height) continue;
if (time > timestamp) continue;
for (tot=n=0, rad=1; rad < 3 && n==0; rad++)
for (r = row-rad; r <= row+rad; r++)
for (c = col-rad; c <= col+rad; c++)
if ((unsigned) r < height && (unsigned) c < width &&
(r != row || c != col) && fcol(r,c) == fcol(row,col)) {
tot += BAYER2(r,c);
n++;
}
BAYER2(row,col) = tot/n;
if (verbose) {
if (!fixed++)
fprintf (stderr,_("Fixed dead pixels at:"));
fprintf (stderr, " %d,%d", col, row);
}
}
if (fixed) fputc ('\n', stderr);
fclose (fp);
}
void CLASS subtract (const char *fname)
{
FILE *fp;
int dim[3]={0,0,0}, comment=0, number=0, error=0, nd=0, c, row, col;
ushort *pixel;
if (!(fp = fopen (fname, "rb"))) {
perror (fname); return;
}
if (fgetc(fp) != 'P' || fgetc(fp) != '5') error = 1;
while (!error && nd < 3 && (c = fgetc(fp)) != EOF) {
if (c == '#') comment = 1;
if (c == '\n') comment = 0;
if (comment) continue;
if (isdigit(c)) number = 1;
if (number) {
if (isdigit(c)) dim[nd] = dim[nd]*10 + c -'0';
else if (isspace(c)) {
number = 0; nd++;
} else error = 1;
}
}
if (error || nd < 3) {
fprintf (stderr,_("%s is not a valid PGM file!\n"), fname);
fclose (fp); return;
} else if (dim[0] != width || dim[1] != height || dim[2] != 65535) {
fprintf (stderr,_("%s has the wrong dimensions!\n"), fname);
fclose (fp); return;
}
pixel = (ushort *) calloc (width, sizeof *pixel);
merror (pixel, "subtract()");
for (row=0; row < height; row++) {
fread (pixel, 2, width, fp);
for (col=0; col < width; col++)
BAYER(row,col) = MAX (BAYER(row,col) - ntohs(pixel[col]), 0);
}
free (pixel);
fclose (fp);
memset (cblack, 0, sizeof cblack);
black = 0;
}
void CLASS gamma_curve (double pwr, double ts, int mode, int imax)
{
int i;
double g[6], bnd[2]={0,0}, r;
g[0] = pwr;
g[1] = ts;
g[2] = g[3] = g[4] = 0;
bnd[g[1] >= 1] = 1;
if (g[1] && (g[1]-1)*(g[0]-1) <= 0) {
for (i=0; i < 48; i++) {
g[2] = (bnd[0] + bnd[1])/2;
if (g[0]) bnd[(pow(g[2]/g[1],-g[0]) - 1)/g[0] - 1/g[2] > -1] = g[2];
else bnd[g[2]/exp(1-1/g[2]) < g[1]] = g[2];
}
g[3] = g[2] / g[1];
if (g[0]) g[4] = g[2] * (1/g[0] - 1);
}
if (g[0]) g[5] = 1 / (g[1]*SQR(g[3])/2 - g[4]*(1 - g[3]) +
(1 - pow(g[3],1+g[0]))*(1 + g[4])/(1 + g[0])) - 1;
else g[5] = 1 / (g[1]*SQR(g[3])/2 + 1
- g[2] - g[3] - g[2]*g[3]*(log(g[3]) - 1)) - 1;
if (!mode--) {
memcpy (gamm, g, sizeof gamm);
return;
}
for (i=0; i < 0x10000; i++) {
curve[i] = 0xffff;
if ((r = (double) i / imax) < 1)
curve[i] = 0x10000 * ( mode
? (r < g[3] ? r*g[1] : (g[0] ? pow( r,g[0])*(1+g[4])-g[4] : log(r)*g[2]+1))
: (r < g[2] ? r/g[1] : (g[0] ? pow((r+g[4])/(1+g[4]),1/g[0]) : exp((r-1)/g[2]))));
}
}
void CLASS pseudoinverse (double (*in)[3], double (*out)[3], int size)
{
double work[3][6], num;
int i, j, k;
for (i=0; i < 3; i++) {
for (j=0; j < 6; j++)
work[i][j] = j == i+3;
for (j=0; j < 3; j++)
for (k=0; k < size; k++)
work[i][j] += in[k][i] * in[k][j];
}
for (i=0; i < 3; i++) {
num = work[i][i];
for (j=0; j < 6; j++)
work[i][j] /= num;
for (k=0; k < 3; k++) {
if (k==i) continue;
num = work[k][i];
for (j=0; j < 6; j++)
work[k][j] -= work[i][j] * num;
}
}
for (i=0; i < size; i++)
for (j=0; j < 3; j++)
for (out[i][j]=k=0; k < 3; k++)
out[i][j] += work[j][k+3] * in[i][k];
}
void CLASS cam_xyz_coeff (float rgb_cam[3][4], double cam_xyz[4][3])
{
double cam_rgb[4][3], inverse[4][3], num;
int i, j, k;
for (i=0; i < colors; i++) /* Multiply out XYZ colorspace */
for (j=0; j < 3; j++)
for (cam_rgb[i][j] = k=0; k < 3; k++)
cam_rgb[i][j] += cam_xyz[i][k] * xyz_rgb[k][j];
for (i=0; i < colors; i++) { /* Normalize cam_rgb so that */
for (num=j=0; j < 3; j++) /* cam_rgb * (1,1,1) is (1,1,1,1) */
num += cam_rgb[i][j];
for (j=0; j < 3; j++)
cam_rgb[i][j] /= num;
pre_mul[i] = 1 / num;
}
pseudoinverse (cam_rgb, inverse, colors);
for (i=0; i < 3; i++)
for (j=0; j < colors; j++)
rgb_cam[i][j] = inverse[j][i];
}
#ifdef COLORCHECK
void CLASS colorcheck()
{
#define NSQ 24
// Coordinates of the GretagMacbeth ColorChecker squares
// width, height, 1st_column, 1st_row
int cut[NSQ][4]; // you must set these
// ColorChecker Chart under 6500-kelvin illumination
static const double gmb_xyY[NSQ][3] = {
{ 0.400, 0.350, 10.1 }, // Dark Skin
{ 0.377, 0.345, 35.8 }, // Light Skin
{ 0.247, 0.251, 19.3 }, // Blue Sky
{ 0.337, 0.422, 13.3 }, // Foliage
{ 0.265, 0.240, 24.3 }, // Blue Flower
{ 0.261, 0.343, 43.1 }, // Bluish Green
{ 0.506, 0.407, 30.1 }, // Orange
{ 0.211, 0.175, 12.0 }, // Purplish Blue
{ 0.453, 0.306, 19.8 }, // Moderate Red
{ 0.285, 0.202, 6.6 }, // Purple
{ 0.380, 0.489, 44.3 }, // Yellow Green
{ 0.473, 0.438, 43.1 }, // Orange Yellow
{ 0.187, 0.129, 6.1 }, // Blue
{ 0.305, 0.478, 23.4 }, // Green
{ 0.539, 0.313, 12.0 }, // Red
{ 0.448, 0.470, 59.1 }, // Yellow
{ 0.364, 0.233, 19.8 }, // Magenta
{ 0.196, 0.252, 19.8 }, // Cyan
{ 0.310, 0.316, 90.0 }, // White
{ 0.310, 0.316, 59.1 }, // Neutral 8
{ 0.310, 0.316, 36.2 }, // Neutral 6.5
{ 0.310, 0.316, 19.8 }, // Neutral 5
{ 0.310, 0.316, 9.0 }, // Neutral 3.5
{ 0.310, 0.316, 3.1 } }; // Black
double gmb_cam[NSQ][4], gmb_xyz[NSQ][3];
double inverse[NSQ][3], cam_xyz[4][3], balance[4], num;
int c, i, j, k, sq, row, col, pass, count[4];
memset (gmb_cam, 0, sizeof gmb_cam);
for (sq=0; sq < NSQ; sq++) {
FORCC count[c] = 0;
for (row=cut[sq][3]; row < cut[sq][3]+cut[sq][1]; row++)
for (col=cut[sq][2]; col < cut[sq][2]+cut[sq][0]; col++) {
c = FC(row,col);
if (c >= colors) c -= 2;
gmb_cam[sq][c] += BAYER2(row,col);
BAYER2(row,col) = black + (BAYER2(row,col)-black)/2;
count[c]++;
}
FORCC gmb_cam[sq][c] = gmb_cam[sq][c]/count[c] - black;
gmb_xyz[sq][0] = gmb_xyY[sq][2] * gmb_xyY[sq][0] / gmb_xyY[sq][1];
gmb_xyz[sq][1] = gmb_xyY[sq][2];
gmb_xyz[sq][2] = gmb_xyY[sq][2] *
(1 - gmb_xyY[sq][0] - gmb_xyY[sq][1]) / gmb_xyY[sq][1];
}
pseudoinverse (gmb_xyz, inverse, NSQ);
for (pass=0; pass < 2; pass++) {
for (raw_color = i=0; i < colors; i++)
for (j=0; j < 3; j++)
for (cam_xyz[i][j] = k=0; k < NSQ; k++)
cam_xyz[i][j] += gmb_cam[k][i] * inverse[k][j];
cam_xyz_coeff (rgb_cam, cam_xyz);
FORCC balance[c] = pre_mul[c] * gmb_cam[20][c];
for (sq=0; sq < NSQ; sq++)
FORCC gmb_cam[sq][c] *= balance[c];
}
if (verbose) {
printf (" { \"%s %s\", %d,\n\t{", make, model, black);
num = 10000 / (cam_xyz[1][0] + cam_xyz[1][1] + cam_xyz[1][2]);
FORCC for (j=0; j < 3; j++)
printf ("%c%d", (c | j) ? ',':' ', (int) (cam_xyz[c][j] * num + 0.5));
puts (" } },");
}
#undef NSQ
}
#endif
void CLASS hat_transform (float *temp, float *base, int st, int size, int sc)
{
int i;
for (i=0; i < sc; i++)
temp[i] = 2*base[st*i] + base[st*(sc-i)] + base[st*(i+sc)];
for (; i+sc < size; i++)
temp[i] = 2*base[st*i] + base[st*(i-sc)] + base[st*(i+sc)];
for (; i < size; i++)
temp[i] = 2*base[st*i] + base[st*(i-sc)] + base[st*(2*size-2-(i+sc))];
}
void CLASS wavelet_denoise()
{
float *fimg=0, *temp, thold, mul[2], avg, diff;
int scale=1, size, lev, hpass, lpass, row, col, nc, c, i, wlast, blk[2];
ushort *window[4];
static const float noise[] =
{ 0.8002,0.2735,0.1202,0.0585,0.0291,0.0152,0.0080,0.0044 };
if (verbose) fprintf (stderr,_("Wavelet denoising...\n"));
while (maximum << scale < 0x10000) scale++;
maximum <<= --scale;
black <<= scale;
FORC4 cblack[c] <<= scale;
if ((size = iheight*iwidth) < 0x15550000)
fimg = (float *) malloc ((size*3 + iheight + iwidth) * sizeof *fimg);
merror (fimg, "wavelet_denoise()");
temp = fimg + size*3;
if ((nc = colors) == 3 && filters) nc++;
#if defined(__sun) && !defined(__GNUC__) /* Fix UFRaw bug #3205673 - NKBJ */
#pragma omp parallel for default(shared) \
private(c,i,hpass,lev,lpass,row,col,thold,fimg,temp)
#else
#pragma omp parallel for default(shared) \
private(c,i,hpass,lev,lpass,row,col,thold,fimg,temp)
#endif
FORC(nc) { /* denoise R,G1,B,G3 individually */
for (i=0; i < size; i++)
fimg[i] = 256 * sqrt(image[i][c] << scale);
for (hpass=lev=0; lev < 5; lev++) {
lpass = size*((lev & 1)+1);
for (row=0; row < iheight; row++) {
hat_transform (temp, fimg+hpass+row*iwidth, 1, iwidth, 1 << lev);
for (col=0; col < iwidth; col++)
fimg[lpass + row*iwidth + col] = temp[col] * 0.25;
}
for (col=0; col < iwidth; col++) {
hat_transform (temp, fimg+lpass+col, iwidth, iheight, 1 << lev);
for (row=0; row < iheight; row++)
fimg[lpass + row*iwidth + col] = temp[row] * 0.25;
}
thold = threshold * noise[lev];
for (i=0; i < size; i++) {
fimg[hpass+i] -= fimg[lpass+i];
if (fimg[hpass+i] < -thold) fimg[hpass+i] += thold;
else if (fimg[hpass+i] > thold) fimg[hpass+i] -= thold;
else fimg[hpass+i] = 0;
if (hpass) fimg[i] += fimg[hpass+i];
}
hpass = lpass;
}
for (i=0; i < size; i++)
image[i][c] = CLIP(SQR(fimg[i]+fimg[lpass+i])/0x10000);
}
if (filters && colors == 3) { /* pull G1 and G3 closer together */
for (row=0; row < 2; row++) {
mul[row] = 0.125 * pre_mul[FC(row+1,0) | 1] / pre_mul[FC(row,0) | 1];
blk[row] = cblack[FC(row,0) | 1];
}
for (i=0; i < 4; i++)
window[i] = (ushort *) fimg + width*i;
for (wlast=-1, row=1; row < height-1; row++) {
while (wlast < row+1) {
for (wlast++, i=0; i < 4; i++)
window[(i+3) & 3] = window[i];
for (col = FC(wlast,1) & 1; col < width; col+=2)
window[2][col] = BAYER(wlast,col);
}
thold = threshold/512;
for (col = (FC(row,0) & 1)+1; col < width-1; col+=2) {
avg = ( window[0][col-1] + window[0][col+1] +
window[2][col-1] + window[2][col+1] - blk[~row & 1]*4 )
* mul[row & 1] + (window[1][col] + blk[row & 1]) * 0.5;
avg = avg < 0 ? 0 : sqrt(avg);
diff = sqrt(BAYER(row,col)) - avg;
if (diff < -thold) diff += thold;
else if (diff > thold) diff -= thold;
else diff = 0;
BAYER(row,col) = CLIP(SQR(avg+diff) + 0.5);
}
}
}
free (fimg);
}
void CLASS scale_colors()
{
unsigned bottom, right, size, row, col, ur, uc, i, x, y, c, sum[8];
int val, dark, sat;
double dsum[8], dmin, dmax;
float scale_mul[4], fr, fc;
ushort *img=0, *pix;
if (user_mul[0])
memcpy (pre_mul, user_mul, sizeof pre_mul);
if (use_auto_wb || (use_camera_wb && cam_mul[0] == -1)) {
memset (dsum, 0, sizeof dsum);
bottom = MIN (greybox[1]+greybox[3], height);
right = MIN (greybox[0]+greybox[2], width);
for (row=greybox[1]; row < bottom; row += 8)
for (col=greybox[0]; col < right; col += 8) {
memset (sum, 0, sizeof sum);
for (y=row; y < row+8 && y < bottom; y++)
for (x=col; x < col+8 && x < right; x++)
FORC4 {
if (filters) {
c = fcol(y,x);
val = BAYER2(y,x);
} else
val = image[y*width+x][c];
if (val > maximum-25) goto skip_block;
if ((val -= cblack[c]) < 0) val = 0;
sum[c] += val;
sum[c+4]++;
if (filters) break;
}
FORC(8) dsum[c] += sum[c];
skip_block: ;
}
FORC4 if (dsum[c]) pre_mul[c] = dsum[c+4] / dsum[c];
}
if (use_camera_wb && cam_mul[0] != -1) {
memset (sum, 0, sizeof sum);
for (row=0; row < 8; row++)
for (col=0; col < 8; col++) {
c = FC(row,col);
if ((val = white[row][col] - cblack[c]) > 0)
sum[c] += val;
sum[c+4]++;
}
if (sum[0] && sum[1] && sum[2] && sum[3])
FORC4 pre_mul[c] = (float) sum[c+4] / sum[c];
else if (cam_mul[0] && cam_mul[2])
memcpy (pre_mul, cam_mul, sizeof pre_mul);
else
fprintf (stderr,_("%s: Cannot use camera white balance.\n"), ifname);
}
if (pre_mul[1] == 0) pre_mul[1] = 1;
if (pre_mul[3] == 0) pre_mul[3] = colors < 4 ? pre_mul[1] : 1;
dark = black;
sat = maximum;
if (threshold) wavelet_denoise();
maximum -= black;
for (dmin=DBL_MAX, dmax=c=0; c < 4; c++) {
if (dmin > pre_mul[c])
dmin = pre_mul[c];
if (dmax < pre_mul[c])
dmax = pre_mul[c];
}
if (!highlight) dmax = dmin;
FORC4 scale_mul[c] = (pre_mul[c] /= dmax) * 65535.0 / maximum;
if (verbose) {
fprintf (stderr,
_("Scaling with darkness %d, saturation %d, and\nmultipliers"), dark, sat);
FORC4 fprintf (stderr, " %f", pre_mul[c]);
fputc ('\n', stderr);
}
if (filters > 1000 && (cblack[4]+1)/2 == 1 && (cblack[5]+1)/2 == 1) {
FORC4 cblack[FC(c/2,c%2)] +=
cblack[6 + c/2 % cblack[4] * cblack[5] + c%2 % cblack[5]];
cblack[4] = cblack[5] = 0;
}
size = iheight*iwidth;
for (i=0; i < size*4; i++) {
if (!(val = ((ushort *)image)[i])) continue;
if (cblack[4] && cblack[5])
val -= cblack[6 + i/4 / iwidth % cblack[4] * cblack[5] +
i/4 % iwidth % cblack[5]];
val -= cblack[i & 3];
val *= scale_mul[i & 3];
((ushort *)image)[i] = CLIP(val);
}
if ((aber[0] != 1 || aber[2] != 1) && colors == 3) {
if (verbose)
fprintf (stderr,_("Correcting chromatic aberration...\n"));
for (c=0; c < 4; c+=2) {
if (aber[c] == 1) continue;
img = (ushort *) malloc (size * sizeof *img);
merror (img, "scale_colors()");
for (i=0; i < size; i++)
img[i] = image[i][c];
for (row=0; row < iheight; row++) {
ur = fr = (row - iheight*0.5) * aber[c] + iheight*0.5;
if (ur > iheight-2) continue;
fr -= ur;
for (col=0; col < iwidth; col++) {
uc = fc = (col - iwidth*0.5) * aber[c] + iwidth*0.5;
if (uc > iwidth-2) continue;
fc -= uc;
pix = img + ur*iwidth + uc;
image[row*iwidth+col][c] =
(pix[ 0]*(1-fc) + pix[ 1]*fc) * (1-fr) +
(pix[iwidth]*(1-fc) + pix[iwidth+1]*fc) * fr;
}
}
free(img);
}
}
}
void CLASS pre_interpolate()
{
ushort (*img)[4];
int row, col, c;
if (shrink) {
if (half_size) {
height = iheight;
width = iwidth;
if (filters == 9) {
for (row=0; row < 3; row++)
for (col=1; col < 4; col++)
if (!(image[row*width+col][0] | image[row*width+col][2]))
goto break2; break2:
for ( ; row < height; row+=3)
for (col=(col-1)%3+1; col < width-1; col+=3) {
img = image + row*width+col;
for (c=0; c < 3; c+=2)
img[0][c] = (img[-1][c] + img[1][c]) >> 1;
}
}
} else {
img = (ushort (*)[4]) calloc (height, width*sizeof *img);
merror (img, "pre_interpolate()");
for (row=0; row < height; row++)
for (col=0; col < width; col++) {
c = fcol(row,col);
img[row*width+col][c] = image[(row >> 1)*iwidth+(col >> 1)][c];
}
free (image);
image = img;
shrink = 0;
}
}
if (filters > 1000 && colors == 3) {
mix_green = four_color_rgb ^ half_size;
if (four_color_rgb | half_size) colors++;
else {
for (row = FC(1,0) >> 1; row < height; row+=2)
for (col = FC(row,1) & 1; col < width; col+=2)
image[row*width+col][1] = image[row*width+col][3];
filters &= ~((filters & 0x55555555) << 1);
}
}
if (half_size) filters = 0;
}
void CLASS border_interpolate (int border)
{
int row;
unsigned col, y, x, f, c, sum[8];
#pragma omp for
for (row=0; row < height; row++)
for (col=0; col < width; col++) {
if (col==border && row >= border && row < height-border)
col = width-border;
memset (sum, 0, sizeof sum);
for (y=row-1; y != row+2; y++)
for (x=col-1; x != col+2; x++)
if (y < height && x < width) {
f = fcol(y,x);
sum[f] += image[y*width+x][f];
sum[f+4]++;
}
f = fcol(row,col);
FORCC if (c != f && sum[c+4])
image[row*width+col][c] = sum[c] / sum[c+4];
}
}
void CLASS lin_interpolate()
{
int code[16][16][32], size=16, *ip, sum[4];
int f, c, i, x, y, row, col, shift, color;
ushort *pix;
if (verbose) fprintf (stderr,_("Bilinear interpolation...\n"));
if (filters == 9) size = 6;
#pragma omp parallel default(shared) \
private(ip, sum, f, c, i, x, y, row, col, shift, color, pix)
{
border_interpolate(1);
#pragma omp for
for (row=0; row < size; row++)
for (col=0; col < size; col++) {
ip = code[row][col]+1;
f = fcol(row,col);
memset (sum, 0, sizeof sum);
for (y=-1; y <= 1; y++)
for (x=-1; x <= 1; x++) {
shift = (y==0) + (x==0);
color = fcol(row+y,col+x);
if (color == f) continue;
*ip++ = (width*y + x)*4 + color;
*ip++ = shift;
*ip++ = color;
sum[color] += 1 << shift;
}
code[row][col][0] = (ip - code[row][col]) / 3;
FORCC
if (c != f) {
*ip++ = c;
*ip++ = 256 / sum[c];
}
}
#pragma omp for
for (row=1; row < height-1; row++)
for (col=1; col < width-1; col++) {
pix = image[row*width+col];
ip = code[row % size][col % size];
memset (sum, 0, sizeof sum);
for (i=*ip++; i--; ip+=3)
sum[ip[2]] += pix[ip[0]] << ip[1];
for (i=colors; --i; ip+=2)
pix[ip[0]] = sum[ip[0]] * ip[1] >> 8;
}
} /* pragma omp parallel */
}
/*
This algorithm is officially called:
"Interpolation using a Threshold-based variable number of gradients"
described in http://scien.stanford.edu/pages/labsite/1999/psych221/projects/99/tingchen/algodep/vargra.html
I've extended the basic idea to work with non-Bayer filter arrays.
Gradients are numbered clockwise from NW=0 to W=7.
*/
void CLASS vng_interpolate()
{
static const signed char *cp, terms[] = {
-2,-2,+0,-1,0,0x01, -2,-2,+0,+0,1,0x01, -2,-1,-1,+0,0,0x01,
-2,-1,+0,-1,0,0x02, -2,-1,+0,+0,0,0x03, -2,-1,+0,+1,1,0x01,
-2,+0,+0,-1,0,0x06, -2,+0,+0,+0,1,0x02, -2,+0,+0,+1,0,0x03,
-2,+1,-1,+0,0,0x04, -2,+1,+0,-1,1,0x04, -2,+1,+0,+0,0,0x06,
-2,+1,+0,+1,0,0x02, -2,+2,+0,+0,1,0x04, -2,+2,+0,+1,0,0x04,
-1,-2,-1,+0,0,0x80, -1,-2,+0,-1,0,0x01, -1,-2,+1,-1,0,0x01,
-1,-2,+1,+0,1,0x01, -1,-1,-1,+1,0,0x88, -1,-1,+1,-2,0,0x40,
-1,-1,+1,-1,0,0x22, -1,-1,+1,+0,0,0x33, -1,-1,+1,+1,1,0x11,
-1,+0,-1,+2,0,0x08, -1,+0,+0,-1,0,0x44, -1,+0,+0,+1,0,0x11,
-1,+0,+1,-2,1,0x40, -1,+0,+1,-1,0,0x66, -1,+0,+1,+0,1,0x22,
-1,+0,+1,+1,0,0x33, -1,+0,+1,+2,1,0x10, -1,+1,+1,-1,1,0x44,
-1,+1,+1,+0,0,0x66, -1,+1,+1,+1,0,0x22, -1,+1,+1,+2,0,0x10,
-1,+2,+0,+1,0,0x04, -1,+2,+1,+0,1,0x04, -1,+2,+1,+1,0,0x04,
+0,-2,+0,+0,1,0x80, +0,-1,+0,+1,1,0x88, +0,-1,+1,-2,0,0x40,
+0,-1,+1,+0,0,0x11, +0,-1,+2,-2,0,0x40, +0,-1,+2,-1,0,0x20,
+0,-1,+2,+0,0,0x30, +0,-1,+2,+1,1,0x10, +0,+0,+0,+2,1,0x08,
+0,+0,+2,-2,1,0x40, +0,+0,+2,-1,0,0x60, +0,+0,+2,+0,1,0x20,
+0,+0,+2,+1,0,0x30, +0,+0,+2,+2,1,0x10, +0,+1,+1,+0,0,0x44,
+0,+1,+1,+2,0,0x10, +0,+1,+2,-1,1,0x40, +0,+1,+2,+0,0,0x60,
+0,+1,+2,+1,0,0x20, +0,+1,+2,+2,0,0x10, +1,-2,+1,+0,0,0x80,
+1,-1,+1,+1,0,0x88, +1,+0,+1,+2,0,0x08, +1,+0,+2,-1,0,0x40,
+1,+0,+2,+1,0,0x10
}, chood[] = { -1,-1, -1,0, -1,+1, 0,+1, +1,+1, +1,0, +1,-1, 0,-1 };
ushort (*brow[5])[4], *pix;
int prow=8, pcol=2, *ip, *code[16][16], gval[8], gmin, gmax, sum[4];
int row, col, x, y, x1, x2, y1, y2, t, weight, grads, color, diag;
int g, diff, thold, num, c;
lin_interpolate();
if (verbose) fprintf (stderr,_("VNG interpolation...\n"));
if (filters == 1) prow = pcol = 16;
if (filters == 9) prow = pcol = 6;
ip = (int *) calloc (prow*pcol, 1280);
merror (ip, "vng_interpolate()");
for (row=0; row < prow; row++) /* Precalculate for VNG */
for (col=0; col < pcol; col++) {
code[row][col] = ip;
for (cp=terms, t=0; t < 64; t++) {
y1 = *cp++; x1 = *cp++;
y2 = *cp++; x2 = *cp++;
weight = *cp++;
grads = *cp++;
color = fcol(row+y1,col+x1);
if (fcol(row+y2,col+x2) != color) continue;
diag = (fcol(row,col+1) == color && fcol(row+1,col) == color) ? 2:1;
if (abs(y1-y2) == diag && abs(x1-x2) == diag) continue;
*ip++ = (y1*width + x1)*4 + color;
*ip++ = (y2*width + x2)*4 + color;
*ip++ = weight;
for (g=0; g < 8; g++)
if (grads & 1<<g) *ip++ = g;
*ip++ = -1;
}
*ip++ = INT_MAX;
for (cp=chood, g=0; g < 8; g++) {
y = *cp++; x = *cp++;
*ip++ = (y*width + x) * 4;
color = fcol(row,col);
if (fcol(row+y,col+x) != color && fcol(row+y*2,col+x*2) == color)
*ip++ = (y*width + x) * 8 + color;
else
*ip++ = 0;
}
}
#pragma omp parallel default(shared) \
private(row,col,g,brow,pix,ip,gval,diff,gmin,gmax,thold,sum,color,num,c,t)
{
brow[4] = (ushort (*)[4]) calloc (width*3, sizeof **brow);
merror (brow[4], "vng_interpolate()");
/* Do not use 'pragma omp for' here */
for (row=0; row < 3; row++)
brow[row] = brow[4] + row*width;
int slice_row = 0;
#pragma omp for schedule(static) nowait
for (row=2; row < height-2; row++) { /* Do VNG interpolation */
for (col=2; col < width-2; col++) {
pix = image[row*width+col];
ip = code[row % prow][col % pcol];
memset (gval, 0, sizeof gval);
while ((g = ip[0]) != INT_MAX) { /* Calculate gradients */
diff = ABS(pix[g] - pix[ip[1]]) << ip[2];
gval[ip[3]] += diff;
ip += 5;
if ((g = ip[-1]) == -1) continue;
gval[g] += diff;
while ((g = *ip++) != -1)
gval[g] += diff;
}
ip++;
gmin = gmax = gval[0]; /* Choose a threshold */
for (g=1; g < 8; g++) {
if (gmin > gval[g]) gmin = gval[g];
if (gmax < gval[g]) gmax = gval[g];
}
if (gmax == 0) {
memcpy (brow[2][col], pix, sizeof *image);
continue;
}
thold = gmin + (gmax >> 1);
memset (sum, 0, sizeof sum);
color = fcol(row,col);
for (num=g=0; g < 8; g++,ip+=2) { /* Average the neighbors */
if (gval[g] <= thold) {
FORCC
if (c == color && ip[1])
sum[c] += (pix[c] + pix[ip[1]]) >> 1;
else
sum[c] += pix[ip[0] + c];
num++;
}
}
FORCC { /* Save to buffer */
t = pix[color];
if (c != color)
t += (sum[c] - sum[color]) / num;
brow[2][col][c] = CLIP(t);
}
}
if (slice_row > 1) /* Write buffer to image */
memcpy (image[(row-2)*width+2], brow[0]+2, (width-4)*sizeof *image);
for (g=0; g < 4; g++)
brow[(g-1) & 3] = brow[g];
slice_row++;
}
if (row == height - 2) {
memcpy (image[(row-2)*width+2], brow[0]+2, (width-4)*sizeof *image);
memcpy (image[(row-1)*width+2], brow[1]+2, (width-4)*sizeof *image);
}
free (brow[4]);
} /* pragma omp parallel */
free (code[0][0]);
}
/*
Patterned Pixel Grouping Interpolation by Alain Desbiolles
*/
void CLASS ppg_interpolate()
{
int dir[5] = { 1, width, -1, -width, 1 };
int row, col, diff[2], guess[2], c, d, i;
ushort (*pix)[4];
if (verbose) fprintf (stderr,_("PPG interpolation...\n"));
#pragma omp parallel default(shared) private(row,col,i,d,c,pix,guess)
{
border_interpolate(3);
/* Fill in the green layer with gradients and pattern recognition: */
#pragma omp for
for (row=3; row < height-3; row++)
for (col=3+(FC(row,3) & 1), c=FC(row,col); col < width-3; col+=2) {
pix = image + row*width+col;
for (i=0; (d=dir[i]) > 0; i++) {
guess[i] = (pix[-d][1] + pix[0][c] + pix[d][1]) * 2
- pix[-2*d][c] - pix[2*d][c];
diff[i] = ( ABS(pix[-2*d][c] - pix[ 0][c]) +
ABS(pix[ 2*d][c] - pix[ 0][c]) +
ABS(pix[ -d][1] - pix[ d][1]) ) * 3 +
( ABS(pix[ 3*d][1] - pix[ d][1]) +
ABS(pix[-3*d][1] - pix[-d][1]) ) * 2;
}
d = dir[i = diff[0] > diff[1]];
pix[0][1] = ULIM(guess[i] >> 2, pix[d][1], pix[-d][1]);
}
/* Calculate red and blue for each green pixel: */
#pragma omp for
for (row=1; row < height-1; row++)
for (col=1+(FC(row,2) & 1), c=FC(row,col+1); col < width-1; col+=2) {
pix = image + row*width+col;
for (i=0; (d=dir[i]) > 0; c=2-c, i++)
pix[0][c] = CLIP((pix[-d][c] + pix[d][c] + 2*pix[0][1]
- pix[-d][1] - pix[d][1]) >> 1);
}
/* Calculate blue for red pixels and vice versa: */
#pragma omp for
for (row=1; row < height-1; row++)
for (col=1+(FC(row,1) & 1), c=2-FC(row,col); col < width-1; col+=2) {
pix = image + row*width+col;
for (i=0; (d=dir[i]+dir[i+1]) > 0; i++) {
diff[i] = ABS(pix[-d][c] - pix[d][c]) +
ABS(pix[-d][1] - pix[0][1]) +
ABS(pix[ d][1] - pix[0][1]);
guess[i] = pix[-d][c] + pix[d][c] + 2*pix[0][1]
- pix[-d][1] - pix[d][1];
}
if (diff[0] != diff[1])
pix[0][c] = CLIP(guess[diff[0] > diff[1]] >> 1);
else
pix[0][c] = CLIP((guess[0]+guess[1]) >> 2);
}
} /* pragma omp parallel */
}
void CLASS cielab (ushort rgb[3], short lab[3])
{
int c, i, j, k;
float r, xyz[3];
static float cbrt[0x10000], xyz_cam[3][4];
if (!rgb) {
#pragma omp for
for (i=0; i < 0x10000; i++) {
r = i / 65535.0;
cbrt[i] = r > 0.008856 ? pow(r,1/3.0) : 7.787*r + 16/116.0;
}
#pragma omp for
for (i=0; i < 3; i++)
for (j=0; j < colors; j++)
for (xyz_cam[i][j] = k=0; k < 3; k++)
#pragma omp atomic
xyz_cam[i][j] += xyz_rgb[i][k] * rgb_cam[k][j] / d65_white[i];
return;
}
xyz[0] = xyz[1] = xyz[2] = 0.5;
FORCC {
xyz[0] += xyz_cam[0][c] * rgb[c];
xyz[1] += xyz_cam[1][c] * rgb[c];
xyz[2] += xyz_cam[2][c] * rgb[c];
}
xyz[0] = cbrt[CLIP((int) xyz[0])];
xyz[1] = cbrt[CLIP((int) xyz[1])];
xyz[2] = cbrt[CLIP((int) xyz[2])];
lab[0] = 64 * (116 * xyz[1] - 16);
lab[1] = 64 * 500 * (xyz[0] - xyz[1]);
lab[2] = 64 * 200 * (xyz[1] - xyz[2]);
}
#define TS 512 /* Tile Size */
#define fcol(row,col) xtrans[(row+6) % 6][(col+6) % 6]
#ifndef _OPENMP
// Makes a 10% difference in performances in sequential version, but none at all
// with OPENMP...
// And STATIC_BUFFER is delicate to manage
#define STATIC_BUFFER
#define FIXED_NDIR
#endif
/* Allow each tile to be calculate independently from the others.
Explanation: the border pixels from each tiles start from the border pixel of
other tiles.
This goes from top to bottom:
- the first pixels of each tile depend on the last ones of the previous tile
- the last ones depend on the first ones of the next tile
In sequential mode, the first pixels use the calculated values from the
previous tiles and fresh values from the next tile.
In OpenMP / parallel mode, the first pixels of the next files might have been
already calculated.
The STRICT_IMAGE makes it possible to use indeed fresh values, always.
*/
#define STRICT_IMAGE
/*
Frank Markesteijn's algorithm for Fuji X-Trans sensors
*/
void CLASS xtrans_interpolate (int passes)
{
int c, d, f, g, h, i, v, ng, row, col, top, left, mrow, mcol;
int val, pass, hm[8], avg[4], color[3][8];
#ifdef FIXED_NDIR
const int ndir = 4 << 1;
#else
int ndir = 4 << (passes > 1);
#endif
static const short orth[12] = { 1,0,0,1,-1,0,0,-1,1,0,0,1 },
patt[2][16] = { { 0,1,0,-1,2,0,-1,0,1,1,1,-1,0,0,0,0 },
{ 0,1,0,-2,1,0,-2,0,1,1,-2,-2,1,-1,-1,1 } },
dir[4] = { 1,TS,TS+1,TS-1 };
short allhex[3][3][2][8], *hex;
ushort min, max, sgrow, sgcol;
ushort (*rgb)[TS][TS][3], (*rix)[3], (*pix)[4];
short (*lab) [TS][3], (*lix)[3];
float (*drv)[TS][TS], diff[6], tr;
char (*homo)[TS][TS];
#ifdef STATIC_BUFFER
static char buffer [TS*TS*(8*11+6)] __attribute__((aligned(64)));
#else
char *buffer;
#endif
if (verbose)
fprintf (stderr,_("%d-pass X-Trans interpolation...\n"), passes);
if ((4 << (passes > 1)) != ndir)
fprintf (stderr,_("4 << (passes > 1) = %d != ndir = %d\n"),
(4 << (passes > 1)), ndir);
#ifdef FIXED_NDIR
#pragma omp parallel default (shared) \
private (top, left, mrow, mcol, row, col, color, pix, hex, c, pass, rix, \
val, h, f, d, g, tr, v, hm, max, avg, lix, diff, i, \
buffer, rgb, lab, drv, homo, ng, min)
#else
#pragma omp parallel default (shared) \
private (top, left, mrow, mcol, row, col, color, pix, hex, c, pass, rix, \
val, h, f, d, g, tr, v, hm, max, avg, lix, diff, i, \
buffer, rgb, lab, drv, homo, ng, min)
#endif
{
cielab(0,0);
border_interpolate(6);
/* Map a green hexagon around each non-green pixel and vice versa: */
#pragma omp for
for (row=0; row < 3; row++)
for (col=0; col < 3; col++)
for (ng=d=0; d < 10; d+=2) {
g = fcol(row,col) == 1;
if (fcol(row+orth[d],col+orth[d+2]) == 1) ng=0; else ng++;
if (ng == 4) { sgrow = row; sgcol = col; }
if (ng == g+1) FORC(8) {
v = orth[d ]*patt[g][c*2] + orth[d+1]*patt[g][c*2+1];
h = orth[d+2]*patt[g][c*2] + orth[d+3]*patt[g][c*2+1];
allhex[row][col][0][c^(g*2 & d)] = h + v*width;
allhex[row][col][1][c^(g*2 & d)] = h + v*TS;
}
}
/* Set green1 and green3 to the minimum and maximum allowed values: */
#pragma omp single
for (row=2; row < height-2; row++)
for (min=~(max=0), col=2; col < width-2; col++) {
if (fcol(row,col) == 1 && (min=~(max=0))) continue;
pix = image + row*width + col;
hex = allhex[row % 3][col % 3][0];
if (!max) FORC(6) {
val = pix[hex[c]][1];
if (min > val) min = val;
if (max < val) max = val;
}
pix[0][1] = min;
pix[0][3] = max;
switch ((row-sgrow) % 3) {
case 1: if (row < height-3) { row++; col--; } break;
case 2: if ((min=~(max=0)) && (col+=2) < width-3 && row > 2) row--;
}
}
#ifdef STRICT_IMAGE
ushort (*working_image)[4];
working_image = (ushort (*)[4]) calloc (iheight, iwidth*sizeof *image);
memcpy (working_image, image, iheight * iwidth * sizeof *image);
merror (working_image, "xtrans_interpolate working_image");
#endif
#ifndef STATIC_BUFFER
buffer = (char *) malloc (TS*TS*(ndir*11+6));
merror (buffer, "xtrans_interpolate()");
#endif
rgb = (ushort(*)[TS][TS][3]) buffer;
lab = (short (*) [TS][3])(buffer + TS*TS*(ndir*6));
drv = (float (*)[TS][TS]) (buffer + TS*TS*(ndir*6+6));
homo = (char (*)[TS][TS]) (buffer + TS*TS*(ndir*10+6));
#pragma omp for
for (top=3; top < height-19; top += TS-16)
for (left=3; left < width-19; left += TS-16) {
mrow = MIN (top+TS, height-3);
mcol = MIN (left+TS, width-3);
for (row=top; row < mrow; row++)
for (col=left; col < mcol; col++)
#ifdef STRICT_IMAGE
memcpy (rgb[0][row-top][col-left], working_image[row*width+col], 6);
#else
memcpy (rgb[0][row-top][col-left], image[row*width+col], 6);
#endif
FORC3 memcpy (rgb[c+1], rgb[0], sizeof *rgb);
/* Interpolate green horizontally, vertically, and along both diagonals: */
for (row=top; row < mrow; row++)
for (col=left; col < mcol; col++) {
if ((f = fcol(row,col)) == 1) continue;
#ifdef STRICT_IMAGE
pix = working_image + row*width + col;
#else
pix = image + row*width + col;
#endif
hex = allhex[row % 3][col % 3][0];
color[1][0] = 174 * (pix[ hex[1]][1] + pix[ hex[0]][1]) -
46 * (pix[2*hex[1]][1] + pix[2*hex[0]][1]);
color[1][1] = 223 * pix[ hex[3]][1] + pix[ hex[2]][1] * 33 +
92 * (pix[ 0 ][f] - pix[ -hex[2]][f]);
FORC(2) color[1][2+c] =
164 * pix[hex[4+c]][1] + 92 * pix[-2*hex[4+c]][1] + 33 *
(2*pix[0][f] - pix[3*hex[4+c]][f] - pix[-3*hex[4+c]][f]);
FORC4 rgb[c^!((row-sgrow) % 3)][row-top][col-left][1] =
LIM(color[1][c] >> 8,pix[0][1],pix[0][3]);
}
for (pass=0; pass < passes; pass++) {
if (pass == 1)
memcpy (rgb+=4, buffer, 4*sizeof *rgb);
/* Recalculate green from interpolated values of closer pixels: */
if (pass) {
for (row=top+2; row < mrow-2; row++)
for (col=left+2; col < mcol-2; col++) {
if ((f = fcol(row,col)) == 1) continue;
#ifdef STRICT_IMAGE
pix = working_image + row*width + col;
#else
pix = image + row*width + col;
#endif
hex = allhex[row % 3][col % 3][1];
for (d=3; d < 6; d++) {
rix = &rgb[(d-2)^!((row-sgrow) % 3)][row-top][col-left];
val = rix[-2*hex[d]][1] + 2*rix[hex[d]][1]
- rix[-2*hex[d]][f] - 2*rix[hex[d]][f] + 3*rix[0][f];
rix[0][1] = LIM(val/3,pix[0][1],pix[0][3]);
}
}
}
/* Interpolate red and blue values for solitary green pixels: */
for (row=(top-sgrow+4)/3*3+sgrow; row < mrow-2; row+=3)
for (col=(left-sgcol+4)/3*3+sgcol; col < mcol-2; col+=3) {
rix = &rgb[0][row-top][col-left];
h = fcol(row,col+1);
memset (diff, 0, sizeof diff);
for (i=1, d=0; d < 6; d++, i^=TS^1, h^=2) {
for (c=0; c < 2; c++, h^=2) {
g = 2*rix[0][1] - rix[i<<c][1] - rix[-i<<c][1];
color[h][d] = g + rix[i<<c][h] + rix[-i<<c][h];
if (d > 1)
diff[d] += SQR (rix[i<<c][1] - rix[-i<<c][1]
- rix[i<<c][h] + rix[-i<<c][h]) + SQR(g);
}
if (d > 1 && (d & 1))
if (diff[d-1] < diff[d])
FORC(2) color[c*2][d] = color[c*2][d-1];
if (d < 2 || (d & 1)) {
FORC(2) rix[0][c*2] = CLIP(color[c*2][d]/2);
rix += TS*TS;
}
}
}
/* Interpolate red for blue pixels and vice versa: */
for (row=top+3; row < mrow-3; row++)
for (col=left+3; col < mcol-3; col++) {
if ((f = 2-fcol(row,col)) == 1) continue;
rix = &rgb[0][row-top][col-left];
c = (row-sgrow) % 3 ? TS:1;
h = 3 * (c ^ TS ^ 1);
for (d=0; d < 4; d++, rix += TS*TS) {
i = d > 1 || ((d ^ c) & 1) ||
((ABS(rix[0][1]-rix[c][1])+ABS(rix[0][1]-rix[-c][1])) <
2*(ABS(rix[0][1]-rix[h][1])+ABS(rix[0][1]-rix[-h][1]))) ? c:h;
rix[0][f] = CLIP((rix[i][f] + rix[-i][f] +
2*rix[0][1] - rix[i][1] - rix[-i][1])/2);
}
}
/* Fill in red and blue for 2x2 blocks of green: */
for (row=top+2; row < mrow-2; row++) if ((row-sgrow) % 3)
for (col=left+2; col < mcol-2; col++) if ((col-sgcol) % 3) {
rix = &rgb[0][row-top][col-left];
hex = allhex[row % 3][col % 3][1];
for (d=0; d < ndir; d+=2, rix += TS*TS)
if (hex[d] + hex[d+1]) {
g = 3*rix[0][1] - 2*rix[hex[d]][1] - rix[hex[d+1]][1];
for (c=0; c < 4; c+=2) rix[0][c] =
CLIP((g + 2*rix[hex[d]][c] + rix[hex[d+1]][c])/3);
} else {
g = 2*rix[0][1] - rix[hex[d]][1] - rix[hex[d+1]][1];
for (c=0; c < 4; c+=2) rix[0][c] =
CLIP((g + rix[hex[d]][c] + rix[hex[d+1]][c])/2);
}
}
}
rgb = (ushort(*)[TS][TS][3]) buffer;
mrow -= top;
mcol -= left;
/* Convert to CIELab and differentiate in all directions: */
for (d=0; d < ndir; d++) {
for (row=2; row < mrow-2; row++)
for (col=2; col < mcol-2; col++)
cielab (rgb[d][row][col], lab[row][col]);
for (f=dir[d & 3],row=3; row < mrow-3; row++)
for (col=3; col < mcol-3; col++) {
lix = &lab[row][col];
g = 2*lix[0][0] - lix[f][0] - lix[-f][0];
drv[d][row][col] = SQR(g)
+ SQR((2*lix[0][1] - lix[f][1] - lix[-f][1] + g*500/232))
+ SQR((2*lix[0][2] - lix[f][2] - lix[-f][2] - g*500/580));
}
}
/* Build homogeneity maps from the derivatives: */
memset(homo, 0, ndir*TS*TS);
for (row=4; row < mrow-4; row++)
for (col=4; col < mcol-4; col++) {
for (tr=FLT_MAX, d=0; d < ndir; d++)
tr = MIN(tr, (drv[d][row][col]));
tr *= 8;
for (d=0; d < ndir; d++)
for (v=-1; v <= 1; v++)
for (h=-1; h <= 1; h++)
homo[d][row][col] += (drv[d][row+v][col+h] <= tr);
}
/* Average the most homogenous pixels for the final result: */
if (height-top < TS+4) mrow = height-top+2;
if (width-left < TS+4) mcol = width-left+2;
for (row = MIN(top,8); row < mrow-8; row++)
for (col = MIN(left,8); col < mcol-8; col++) {
for (d=0; d < ndir; d++)
for (hm[d]=0, v=-2; v <= 2; v++)
hm[d] += homo[d][row+v][col-2] +
homo[d][row+v][col-1] +
homo[d][row+v][col+0] +
homo[d][row+v][col+1] +
homo[d][row+v][col+2];
for (d=0; d < ndir-4; d++)
if (hm[d] < hm[d+4]) hm[d ] = 0; else
if (hm[d] > hm[d+4]) hm[d+4] = 0;
for (max=hm[0],d=1; d < ndir; d++)
if (max < hm[d]) max = hm[d];
max -= max >> 3;
memset (avg, 0, sizeof avg);
for (d=0; d < ndir; d++)
if (hm[d] >= max) {
FORC3 {
#pragma omp atomic
avg[c] += rgb[d][row][col][c];
}
avg[3]++;
}
FORC3 image[(row+top)*width+col+left][c] = avg[c]/avg[3];
}
}
#ifndef STATIC_BUFFER
free(buffer);
#endif
border_interpolate(8);
} /* pragma omp parallel */
}
#undef fcol
/*
Adaptive Homogeneity-Directed interpolation is based on
the work of Keigo Hirakawa, Thomas Parks, and Paul Lee.
*/
void CLASS ahd_interpolate()
{
int i, j, top, left, row, col, tr, tc, c, d, val, hm[2];
static const int dir[4] = { -1, 1, -TS, TS };
unsigned ldiff[2][4], abdiff[2][4], leps, abeps;
ushort (*rgb)[TS][TS][3], (*rix)[3], (*pix)[4];
short (*lab)[TS][TS][3], (*lix)[3];
char (*homo)[TS][TS], *buffer;
if (verbose) fprintf (stderr,_("AHD interpolation...\n"));
#pragma omp parallel default(shared) \
private(top,left,row,col,pix,rix,lix,c,val,d,tc,tr,i,j,ldiff,abdiff,leps, \
abeps,hm,buffer,rgb,lab,homo)
{
cielab (0,0);
border_interpolate(5);
buffer = (char *) malloc (26*TS*TS);
merror (buffer, "ahd_interpolate()");
rgb = (ushort(*)[TS][TS][3]) buffer;
lab = (short (*)[TS][TS][3])(buffer + 12*TS*TS);
homo = (char (*)[TS][TS]) (buffer + 24*TS*TS);
#pragma omp for
for (top=2; top < height-5; top += TS-6)
for (left=2; left < width-5; left += TS-6) {
/* Interpolate green horizontally and vertically: */
for (row=top; row < top+TS && row < height-2; row++) {
col = left + (FC(row,left) & 1);
for (c = FC(row,col); col < left+TS && col < width-2; col+=2) {
pix = image + row*width+col;
val = ((pix[-1][1] + pix[0][c] + pix[1][1]) * 2
- pix[-2][c] - pix[2][c]) >> 2;
rgb[0][row-top][col-left][1] = ULIM(val,pix[-1][1],pix[1][1]);
val = ((pix[-width][1] + pix[0][c] + pix[width][1]) * 2
- pix[-2*width][c] - pix[2*width][c]) >> 2;
rgb[1][row-top][col-left][1] = ULIM(val,pix[-width][1],pix[width][1]);
}
}
/* Interpolate red and blue, and convert to CIELab: */
for (d=0; d < 2; d++)
for (row=top+1; row < top+TS-1 && row < height-3; row++)
for (col=left+1; col < left+TS-1 && col < width-3; col++) {
pix = image + row*width+col;
rix = &rgb[d][row-top][col-left];
lix = &lab[d][row-top][col-left];
if ((c = 2 - FC(row,col)) == 1) {
c = FC(row+1,col);
val = pix[0][1] + (( pix[-1][2-c] + pix[1][2-c]
- rix[-1][1] - rix[1][1] ) >> 1);
rix[0][2-c] = CLIP(val);
val = pix[0][1] + (( pix[-width][c] + pix[width][c]
- rix[-TS][1] - rix[TS][1] ) >> 1);
} else
val = rix[0][1] + (( pix[-width-1][c] + pix[-width+1][c]
+ pix[+width-1][c] + pix[+width+1][c]
- rix[-TS-1][1] - rix[-TS+1][1]
- rix[+TS-1][1] - rix[+TS+1][1] + 1) >> 2);
rix[0][c] = CLIP(val);
c = FC(row,col);
rix[0][c] = pix[0][c];
cielab (rix[0],lix[0]);
}
/* Build homogeneity maps from the CIELab images: */
memset (homo, 0, 2*TS*TS);
for (row=top+2; row < top+TS-2 && row < height-4; row++) {
tr = row-top;
for (col=left+2; col < left+TS-2 && col < width-4; col++) {
tc = col-left;
for (d=0; d < 2; d++) {
lix = &lab[d][tr][tc];
for (i=0; i < 4; i++) {
ldiff[d][i] = ABS(lix[0][0]-lix[dir[i]][0]);
abdiff[d][i] = SQR(lix[0][1]-lix[dir[i]][1])
+ SQR(lix[0][2]-lix[dir[i]][2]);
}
}
leps = MIN(MAX(ldiff[0][0],ldiff[0][1]),
MAX(ldiff[1][2],ldiff[1][3]));
abeps = MIN(MAX(abdiff[0][0],abdiff[0][1]),
MAX(abdiff[1][2],abdiff[1][3]));
for (d=0; d < 2; d++)
for (i=0; i < 4; i++)
if (ldiff[d][i] <= leps && abdiff[d][i] <= abeps)
homo[d][tr][tc]++;
}
}
/* Combine the most homogenous pixels for the final result: */
for (row=top+3; row < top+TS-3 && row < height-5; row++) {
tr = row-top;
for (col=left+3; col < left+TS-3 && col < width-5; col++) {
tc = col-left;
for (d=0; d < 2; d++)
for (hm[d]=0, i=tr-1; i <= tr+1; i++)
for (j=tc-1; j <= tc+1; j++)
hm[d] += homo[d][i][j];
if (hm[0] != hm[1])
FORC3 image[row*width+col][c] = rgb[hm[1] > hm[0]][tr][tc][c];
else
FORC3 image[row*width+col][c] =
(rgb[0][tr][tc][c] + rgb[1][tr][tc][c]) >> 1;
}
}
}
free (buffer);
} /* pragma omp parallel */
}
#undef TS
void CLASS median_filter()
{
ushort (*pix)[4];
int pass, c, i, j, k, med[9];
static const uchar opt[] = /* Optimal 9-element median search */
{ 1,2, 4,5, 7,8, 0,1, 3,4, 6,7, 1,2, 4,5, 7,8,
0,3, 5,8, 4,7, 3,6, 1,4, 2,5, 4,7, 4,2, 6,4, 4,2 };
for (pass=1; pass <= med_passes; pass++) {
if (verbose)
fprintf (stderr,_("Median filter pass %d...\n"), pass);
for (c=0; c < 3; c+=2) {
for (pix = image; pix < image+width*height; pix++)
pix[0][3] = pix[0][c];
for (pix = image+width; pix < image+width*(height-1); pix++) {
if ((pix-image+1) % width < 2) continue;
for (k=0, i = -width; i <= width; i += width)
for (j = i-1; j <= i+1; j++)
med[k++] = pix[j][3] - pix[j][1];
for (i=0; i < sizeof opt; i+=2)
if (med[opt[i]] > med[opt[i+1]])
SWAP (med[opt[i]] , med[opt[i+1]]);
pix[0][c] = CLIP(med[4] + pix[0][1]);
}
}
}
}
void CLASS blend_highlights()
{
int clip=INT_MAX, row, col, c, i, j;
static const float trans[2][4][4] =
{ { { 1,1,1 }, { 1.7320508,-1.7320508,0 }, { -1,-1,2 } },
{ { 1,1,1,1 }, { 1,-1,1,-1 }, { 1,1,-1,-1 }, { 1,-1,-1,1 } } };
static const float itrans[2][4][4] =
{ { { 1,0.8660254,-0.5 }, { 1,-0.8660254,-0.5 }, { 1,0,1 } },
{ { 1,1,1,1 }, { 1,-1,1,-1 }, { 1,1,-1,-1 }, { 1,-1,-1,1 } } };
float cam[2][4], lab[2][4], sum[2], chratio;
if ((unsigned) (colors-3) > 1) return;
if (verbose) fprintf (stderr,_("Blending highlights...\n"));
FORCC if (clip > (i = 65535*pre_mul[c])) clip = i;
for (row=0; row < height; row++)
for (col=0; col < width; col++) {
FORCC if (image[row*width+col][c] > clip) break;
if (c == colors) continue;
FORCC {
cam[0][c] = image[row*width+col][c];
cam[1][c] = MIN(cam[0][c],clip);
}
for (i=0; i < 2; i++) {
FORCC for (lab[i][c]=j=0; j < colors; j++)
lab[i][c] += trans[colors-3][c][j] * cam[i][j];
for (sum[i]=0,c=1; c < colors; c++)
sum[i] += SQR(lab[i][c]);
}
chratio = sqrt(sum[1]/sum[0]);
for (c=1; c < colors; c++)
lab[0][c] *= chratio;
FORCC for (cam[0][c]=j=0; j < colors; j++)
cam[0][c] += itrans[colors-3][c][j] * lab[0][j];
FORCC image[row*width+col][c] = cam[0][c] / colors;
}
}
#define SCALE (4 >> shrink)
void CLASS recover_highlights()
{
float *map, sum, wgt, grow;
int hsat[4], count, spread, change, val, i;
unsigned high, wide, mrow, mcol, row, col, kc, c, d, y, x;
ushort *pixel;
static const signed char dir[8][2] =
{ {-1,-1}, {-1,0}, {-1,1}, {0,1}, {1,1}, {1,0}, {1,-1}, {0,-1} };
if (verbose) fprintf (stderr,_("Rebuilding highlights...\n"));
grow = pow (2, 4-highlight);
FORCC hsat[c] = 32000 * pre_mul[c];
for (kc=0, c=1; c < colors; c++)
if (pre_mul[kc] < pre_mul[c]) kc = c;
high = height / SCALE;
wide = width / SCALE;
map = (float *) calloc (high, wide*sizeof *map);
merror (map, "recover_highlights()");
FORCC if (c != kc) {
memset (map, 0, high*wide*sizeof *map);
for (mrow=0; mrow < high; mrow++)
for (mcol=0; mcol < wide; mcol++) {
sum = wgt = count = 0;
for (row = mrow*SCALE; row < (mrow+1)*SCALE; row++)
for (col = mcol*SCALE; col < (mcol+1)*SCALE; col++) {
pixel = image[row*width+col];
if (pixel[c] / hsat[c] == 1 && pixel[kc] > 24000) {
sum += pixel[c];
wgt += pixel[kc];
count++;
}
}
if (count == SCALE*SCALE)
map[mrow*wide+mcol] = sum / wgt;
}
for (spread = 32/grow; spread--; ) {
for (mrow=0; mrow < high; mrow++)
for (mcol=0; mcol < wide; mcol++) {
if (map[mrow*wide+mcol]) continue;
sum = count = 0;
for (d=0; d < 8; d++) {
y = mrow + dir[d][0];
x = mcol + dir[d][1];
if (y < high && x < wide && map[y*wide+x] > 0) {
sum += (1 + (d & 1)) * map[y*wide+x];
count += 1 + (d & 1);
}
}
if (count > 3)
map[mrow*wide+mcol] = - (sum+grow) / (count+grow);
}
for (change=i=0; i < high*wide; i++)
if (map[i] < 0) {
map[i] = -map[i];
change = 1;
}
if (!change) break;
}
for (i=0; i < high*wide; i++)
if (map[i] == 0) map[i] = 1;
for (mrow=0; mrow < high; mrow++)
for (mcol=0; mcol < wide; mcol++) {
for (row = mrow*SCALE; row < (mrow+1)*SCALE; row++)
for (col = mcol*SCALE; col < (mcol+1)*SCALE; col++) {
pixel = image[row*width+col];
if (pixel[c] / hsat[c] > 1) {
val = pixel[kc] * map[mrow*wide+mcol];
if (pixel[c] < val) pixel[c] = CLIP(val);
}
}
}
}
free (map);
}
#undef SCALE
void CLASS tiff_get (unsigned base,
unsigned *tag, unsigned *type, unsigned *len, unsigned *save)
{
*tag = get2();
*type = get2();
*len = get4();
*save = ftell(ifp) + 4;
if (*len * ("11124811248484"[*type < 14 ? *type:0]-'0') > 4)
fseek (ifp, get4()+base, SEEK_SET);
}
void CLASS parse_thumb_note (int base, unsigned toff, unsigned tlen)
{
unsigned entries, tag, type, len, save;
entries = get2();
while (entries--) {
tiff_get (base, &tag, &type, &len, &save);
if (tag == toff) thumb_offset = get4()+base;
if (tag == tlen) thumb_length = get4();
fseek (ifp, save, SEEK_SET);
}
}
int CLASS parse_tiff_ifd (int base);
void CLASS parse_makernote (int base, int uptag)
{
static const uchar xlat[2][256] = {
{ 0xc1,0xbf,0x6d,0x0d,0x59,0xc5,0x13,0x9d,0x83,0x61,0x6b,0x4f,0xc7,0x7f,0x3d,0x3d,
0x53,0x59,0xe3,0xc7,0xe9,0x2f,0x95,0xa7,0x95,0x1f,0xdf,0x7f,0x2b,0x29,0xc7,0x0d,
0xdf,0x07,0xef,0x71,0x89,0x3d,0x13,0x3d,0x3b,0x13,0xfb,0x0d,0x89,0xc1,0x65,0x1f,
0xb3,0x0d,0x6b,0x29,0xe3,0xfb,0xef,0xa3,0x6b,0x47,0x7f,0x95,0x35,0xa7,0x47,0x4f,
0xc7,0xf1,0x59,0x95,0x35,0x11,0x29,0x61,0xf1,0x3d,0xb3,0x2b,0x0d,0x43,0x89,0xc1,
0x9d,0x9d,0x89,0x65,0xf1,0xe9,0xdf,0xbf,0x3d,0x7f,0x53,0x97,0xe5,0xe9,0x95,0x17,
0x1d,0x3d,0x8b,0xfb,0xc7,0xe3,0x67,0xa7,0x07,0xf1,0x71,0xa7,0x53,0xb5,0x29,0x89,
0xe5,0x2b,0xa7,0x17,0x29,0xe9,0x4f,0xc5,0x65,0x6d,0x6b,0xef,0x0d,0x89,0x49,0x2f,
0xb3,0x43,0x53,0x65,0x1d,0x49,0xa3,0x13,0x89,0x59,0xef,0x6b,0xef,0x65,0x1d,0x0b,
0x59,0x13,0xe3,0x4f,0x9d,0xb3,0x29,0x43,0x2b,0x07,0x1d,0x95,0x59,0x59,0x47,0xfb,
0xe5,0xe9,0x61,0x47,0x2f,0x35,0x7f,0x17,0x7f,0xef,0x7f,0x95,0x95,0x71,0xd3,0xa3,
0x0b,0x71,0xa3,0xad,0x0b,0x3b,0xb5,0xfb,0xa3,0xbf,0x4f,0x83,0x1d,0xad,0xe9,0x2f,
0x71,0x65,0xa3,0xe5,0x07,0x35,0x3d,0x0d,0xb5,0xe9,0xe5,0x47,0x3b,0x9d,0xef,0x35,
0xa3,0xbf,0xb3,0xdf,0x53,0xd3,0x97,0x53,0x49,0x71,0x07,0x35,0x61,0x71,0x2f,0x43,
0x2f,0x11,0xdf,0x17,0x97,0xfb,0x95,0x3b,0x7f,0x6b,0xd3,0x25,0xbf,0xad,0xc7,0xc5,
0xc5,0xb5,0x8b,0xef,0x2f,0xd3,0x07,0x6b,0x25,0x49,0x95,0x25,0x49,0x6d,0x71,0xc7 },
{ 0xa7,0xbc,0xc9,0xad,0x91,0xdf,0x85,0xe5,0xd4,0x78,0xd5,0x17,0x46,0x7c,0x29,0x4c,
0x4d,0x03,0xe9,0x25,0x68,0x11,0x86,0xb3,0xbd,0xf7,0x6f,0x61,0x22,0xa2,0x26,0x34,
0x2a,0xbe,0x1e,0x46,0x14,0x68,0x9d,0x44,0x18,0xc2,0x40,0xf4,0x7e,0x5f,0x1b,0xad,
0x0b,0x94,0xb6,0x67,0xb4,0x0b,0xe1,0xea,0x95,0x9c,0x66,0xdc,0xe7,0x5d,0x6c,0x05,
0xda,0xd5,0xdf,0x7a,0xef,0xf6,0xdb,0x1f,0x82,0x4c,0xc0,0x68,0x47,0xa1,0xbd,0xee,
0x39,0x50,0x56,0x4a,0xdd,0xdf,0xa5,0xf8,0xc6,0xda,0xca,0x90,0xca,0x01,0x42,0x9d,
0x8b,0x0c,0x73,0x43,0x75,0x05,0x94,0xde,0x24,0xb3,0x80,0x34,0xe5,0x2c,0xdc,0x9b,
0x3f,0xca,0x33,0x45,0xd0,0xdb,0x5f,0xf5,0x52,0xc3,0x21,0xda,0xe2,0x22,0x72,0x6b,
0x3e,0xd0,0x5b,0xa8,0x87,0x8c,0x06,0x5d,0x0f,0xdd,0x09,0x19,0x93,0xd0,0xb9,0xfc,
0x8b,0x0f,0x84,0x60,0x33,0x1c,0x9b,0x45,0xf1,0xf0,0xa3,0x94,0x3a,0x12,0x77,0x33,
0x4d,0x44,0x78,0x28,0x3c,0x9e,0xfd,0x65,0x57,0x16,0x94,0x6b,0xfb,0x59,0xd0,0xc8,
0x22,0x36,0xdb,0xd2,0x63,0x98,0x43,0xa1,0x04,0x87,0x86,0xf7,0xa6,0x26,0xbb,0xd6,
0x59,0x4d,0xbf,0x6a,0x2e,0xaa,0x2b,0xef,0xe6,0x78,0xb6,0x4e,0xe0,0x2f,0xdc,0x7c,
0xbe,0x57,0x19,0x32,0x7e,0x2a,0xd0,0xb8,0xba,0x29,0x00,0x3c,0x52,0x7d,0xa8,0x49,
0x3b,0x2d,0xeb,0x25,0x49,0xfa,0xa3,0xaa,0x39,0xa7,0xc5,0xa7,0x50,0x11,0x36,0xfb,
0xc6,0x67,0x4a,0xf5,0xa5,0x12,0x65,0x7e,0xb0,0xdf,0xaf,0x4e,0xb3,0x61,0x7f,0x2f } };
unsigned offset=0, entries, tag, type, len, save, c;
unsigned ver97=0, serial=0, i, wbi=0, wb[4]={0,0,0,0};
uchar buf97[324], ci, cj, ck;
short morder, sorder=order;
char buf[10];
/*
The MakerNote might have its own TIFF header (possibly with
its own byte-order!), or it might just be a table.
*/
if (!strcmp(make,"Nokia")) return;
fread (buf, 1, 10, ifp);
if (!strncmp (buf,"KDK" ,3) || /* these aren't TIFF tables */
!strncmp (buf,"VER" ,3) ||
!strncmp (buf,"IIII",4) ||
!strncmp (buf,"MMMM",4)) return;
if (!strncmp (buf,"KC" ,2) || /* Konica KD-400Z, KD-510Z */
!strncmp (buf,"MLY" ,3)) { /* Minolta DiMAGE G series */
order = 0x4d4d;
while ((i=ftell(ifp)) < data_offset && i < 16384) {
wb[0] = wb[2]; wb[2] = wb[1]; wb[1] = wb[3];
wb[3] = get2();
if (wb[1] == 256 && wb[3] == 256 &&
wb[0] > 256 && wb[0] < 640 && wb[2] > 256 && wb[2] < 640)
FORC4 cam_mul[c] = wb[c];
}
goto quit;
}
if (!strcmp (buf,"Nikon")) {
base = ftell(ifp);
order = get2();
if (get2() != 42) goto quit;
offset = get4();
fseek (ifp, offset-8, SEEK_CUR);
} else if (!strcmp (buf,"OLYMPUS") ||
!strcmp (buf,"PENTAX ")) {
base = ftell(ifp)-10;
fseek (ifp, -2, SEEK_CUR);
order = get2();
if (buf[0] == 'O') get2();
} else if (!strncmp (buf,"SONY",4) ||
!strcmp (buf,"Panasonic")) {
goto nf;
} else if (!strncmp (buf,"FUJIFILM",8)) {
base = ftell(ifp)-10;
nf: order = 0x4949;
fseek (ifp, 2, SEEK_CUR);
} else if (!strcmp (buf,"OLYMP") ||
!strcmp (buf,"LEICA") ||
!strcmp (buf,"Ricoh") ||
!strcmp (buf,"EPSON"))
fseek (ifp, -2, SEEK_CUR);
else if (!strcmp (buf,"AOC") ||
!strcmp (buf,"QVC"))
fseek (ifp, -4, SEEK_CUR);
else {
fseek (ifp, -10, SEEK_CUR);
if (!strncmp(make,"SAMSUNG",7))
base = ftell(ifp);
}
entries = get2();
if (entries > 1000) return;
morder = order;
while (entries--) {
order = morder;
tiff_get (base, &tag, &type, &len, &save);
tag |= uptag << 16;
if (tag == 2 && strstr(make,"NIKON") && !iso_speed)
iso_speed = (get2(),get2());
if (tag == 4 && len > 26 && len < 35) {
if ((i=(get4(),get2())) != 0x7fff && !iso_speed)
iso_speed = 50 * pow (2, i/32.0 - 4);
if ((i=(get2(),get2())) != 0x7fff && !aperture)
aperture = pow (2, i/64.0);
if ((i=get2()) != 0xffff && !shutter)
shutter = pow (2, (short) i/-32.0);
wbi = (get2(),get2());
shot_order = (get2(),get2());
}
if ((tag == 4 || tag == 0x114) && !strncmp(make,"KONICA",6)) {
fseek (ifp, tag == 4 ? 140:160, SEEK_CUR);
switch (get2()) {
case 72: flip = 0; break;
case 76: flip = 6; break;
case 82: flip = 5; break;
}
}
if (tag == 7 && type == 2 && len > 20)
fgets (model2, 64, ifp);
if (tag == 8 && type == 4)
shot_order = get4();
if (tag == 9 && !strcmp(make,"Canon"))
fread (artist, 64, 1, ifp);
if (tag == 0xc && len == 4)
FORC3 cam_mul[(c << 1 | c >> 1) & 3] = getreal(type);
if (tag == 0xd && type == 7 && get2() == 0xaaaa) {
for (c=i=2; (ushort) c != 0xbbbb && i < len; i++)
c = c << 8 | fgetc(ifp);
while ((i+=4) < len-5)
if (get4() == 257 && (i=len) && (c = (get4(),fgetc(ifp))) < 3)
flip = "065"[c]-'0';
}
if (tag == 0x10 && type == 4)
unique_id = get4();
if (tag == 0x11 && is_raw && !strncmp(make,"NIKON",5)) {
fseek (ifp, get4()+base, SEEK_SET);
parse_tiff_ifd (base);
}
if (tag == 0x14 && type == 7) {
if (len == 2560) {
fseek (ifp, 1248, SEEK_CUR);
goto get2_256;
}
fread (buf, 1, 10, ifp);
if (!strncmp(buf,"NRW ",4)) {
fseek (ifp, strcmp(buf+4,"0100") ? 46:1546, SEEK_CUR);
cam_mul[0] = get4() << 2;
cam_mul[1] = get4() + get4();
cam_mul[2] = get4() << 2;
}
}
if (tag == 0x15 && type == 2 && is_raw)
fread (model, 64, 1, ifp);
if (strstr(make,"PENTAX")) {
if (tag == 0x1b) tag = 0x1018;
if (tag == 0x1c) tag = 0x1017;
}
if (tag == 0x1d)
while ((c = fgetc(ifp)) && c != EOF)
serial = serial*10 + (isdigit(c) ? c - '0' : c % 10);
if (tag == 0x29 && type == 1) {
c = wbi < 18 ? "012347800000005896"[wbi]-'0' : 0;
fseek (ifp, 8 + c*32, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1) ^ 1] = get4();
}
if (tag == 0x3d && type == 3 && len == 4)
FORC4 cblack[c ^ c >> 1] = get2() >> (14-tiff_bps);
if (tag == 0x81 && type == 4) {
data_offset = get4();
fseek (ifp, data_offset + 41, SEEK_SET);
raw_height = get2() * 2;
raw_width = get2();
filters = 0x61616161;
}
if ((tag == 0x81 && type == 7) ||
(tag == 0x100 && type == 7) ||
(tag == 0x280 && type == 1)) {
thumb_offset = ftell(ifp);
thumb_length = len;
}
if (tag == 0x88 && type == 4 && (thumb_offset = get4()))
thumb_offset += base;
if (tag == 0x89 && type == 4)
thumb_length = get4();
if (tag == 0x8c || tag == 0x96)
meta_offset = ftell(ifp);
if (tag == 0x97) {
for (i=0; i < 4; i++)
ver97 = ver97 * 10 + fgetc(ifp)-'0';
switch (ver97) {
case 100:
fseek (ifp, 68, SEEK_CUR);
FORC4 cam_mul[(c >> 1) | ((c & 1) << 1)] = get2();
break;
case 102:
fseek (ifp, 6, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1)] = get2();
break;
case 103:
fseek (ifp, 16, SEEK_CUR);
FORC4 cam_mul[c] = get2();
}
if (ver97 >= 200) {
if (ver97 != 205) fseek (ifp, 280, SEEK_CUR);
fread (buf97, 324, 1, ifp);
}
}
if (tag == 0xa1 && type == 7) {
order = 0x4949;
fseek (ifp, 140, SEEK_CUR);
FORC3 cam_mul[c] = get4();
}
if (tag == 0xa4 && type == 3) {
fseek (ifp, wbi*48, SEEK_CUR);
FORC3 cam_mul[c] = get2();
}
if (tag == 0xa7 && (unsigned) (ver97-200) < 17) {
ci = xlat[0][serial & 0xff];
cj = xlat[1][fgetc(ifp)^fgetc(ifp)^fgetc(ifp)^fgetc(ifp)];
ck = 0x60;
for (i=0; i < 324; i++)
buf97[i] ^= (cj += ci * ck++);
i = "66666>666;6A;:;55"[ver97-200] - '0';
FORC4 cam_mul[c ^ (c >> 1) ^ (i & 1)] =
sget2 (buf97 + (i & -2) + c*2);
}
if (tag == 0x200 && len == 3)
shot_order = (get4(),get4());
if (tag == 0x200 && len == 4)
FORC4 cblack[c ^ c >> 1] = get2();
if (tag == 0x201 && len == 4)
FORC4 cam_mul[c ^ (c >> 1)] = get2();
if (tag == 0x220 && type == 7)
meta_offset = ftell(ifp);
if (tag == 0x401 && type == 4 && len == 4)
FORC4 cblack[c ^ c >> 1] = get4();
if (tag == 0xe01) { /* Nikon Capture Note */
order = 0x4949;
fseek (ifp, 22, SEEK_CUR);
for (offset=22; offset+22 < len; offset += 22+i) {
tag = get4();
fseek (ifp, 14, SEEK_CUR);
i = get4()-4;
if (tag == 0x76a43207) flip = get2();
else fseek (ifp, i, SEEK_CUR);
}
}
if (tag == 0xe80 && len == 256 && type == 7) {
fseek (ifp, 48, SEEK_CUR);
cam_mul[0] = get2() * 508 * 1.078 / 0x10000;
cam_mul[2] = get2() * 382 * 1.173 / 0x10000;
}
if (tag == 0xf00 && type == 7) {
if (len == 614)
fseek (ifp, 176, SEEK_CUR);
else if (len == 734 || len == 1502)
fseek (ifp, 148, SEEK_CUR);
else goto next;
goto get2_256;
}
if ((tag == 0x1011 && len == 9) || tag == 0x20400200)
for (i=0; i < 3; i++)
FORC3 cmatrix[i][c] = ((short) get2()) / 256.0;
if ((tag == 0x1012 || tag == 0x20400600) && len == 4)
FORC4 cblack[c ^ c >> 1] = get2();
if (tag == 0x1017 || tag == 0x20400100)
cam_mul[0] = get2() / 256.0;
if (tag == 0x1018 || tag == 0x20400100)
cam_mul[2] = get2() / 256.0;
if (tag == 0x2011 && len == 2) {
get2_256:
order = 0x4d4d;
cam_mul[0] = get2() / 256.0;
cam_mul[2] = get2() / 256.0;
}
if ((tag | 0x70) == 0x2070 && (type == 4 || type == 13))
fseek (ifp, get4()+base, SEEK_SET);
if (tag == 0x2020 && !strncmp(buf,"OLYMP",5))
parse_thumb_note (base, 257, 258);
if (tag == 0x2040)
parse_makernote (base, 0x2040);
if (tag == 0xb028) {
fseek (ifp, get4()+base, SEEK_SET);
parse_thumb_note (base, 136, 137);
}
if (tag == 0x4001 && len > 500) {
i = len == 582 ? 50 : len == 653 ? 68 : len == 5120 ? 142 : 126;
fseek (ifp, i, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1)] = get2();
for (i+=18; i <= len; i+=10) {
get2();
FORC4 sraw_mul[c ^ (c >> 1)] = get2();
if (sraw_mul[1] == 1170) break;
}
}
if (tag == 0x4021 && get4() && get4())
FORC4 cam_mul[c] = 1024;
if (tag == 0xa021)
FORC4 cam_mul[c ^ (c >> 1)] = get4();
if (tag == 0xa028)
FORC4 cam_mul[c ^ (c >> 1)] -= get4();
if (tag == 0xb001)
unique_id = get2();
next:
fseek (ifp, save, SEEK_SET);
}
quit:
order = sorder;
}
/*
Since the TIFF DateTime string has no timezone information,
assume that the camera's clock was set to Universal Time.
*/
void CLASS get_timestamp (int reversed)
{
struct tm t;
char str[20];
int i;
str[19] = 0;
if (reversed)
for (i=19; i--; ) str[i] = fgetc(ifp);
else
fread (str, 19, 1, ifp);
memset (&t, 0, sizeof t);
if (sscanf (str, "%d:%d:%d %d:%d:%d", &t.tm_year, &t.tm_mon,
&t.tm_mday, &t.tm_hour, &t.tm_min, &t.tm_sec) != 6)
return;
t.tm_year -= 1900;
t.tm_mon -= 1;
t.tm_isdst = -1;
if (mktime(&t) > 0)
timestamp = mktime(&t);
}
void CLASS parse_exif (int base)
{
unsigned kodak, entries, tag, type, len, save, c;
double expo;
kodak = !strncmp(make,"EASTMAN",7) && tiff_nifds < 3;
entries = get2();
while (entries--) {
tiff_get (base, &tag, &type, &len, &save);
switch (tag) {
case 33434: tiff_ifd[tiff_nifds-1].shutter =
shutter = getreal(type); break;
case 33437: aperture = getreal(type); break;
case 34855: iso_speed = get2(); break;
case 36867:
case 36868: get_timestamp(0); break;
case 37377: if ((expo = -getreal(type)) < 128)
tiff_ifd[tiff_nifds-1].shutter =
shutter = pow (2, expo); break;
case 37378: aperture = pow (2, getreal(type)/2); break;
case 37386: focal_len = getreal(type); break;
case 37500: parse_makernote (base, 0); break;
case 40962: if (kodak) raw_width = get4(); break;
case 40963: if (kodak) raw_height = get4(); break;
case 41730:
if (get4() == 0x20002)
for (exif_cfa=c=0; c < 8; c+=2)
exif_cfa |= fgetc(ifp) * 0x01010101 << c;
}
fseek (ifp, save, SEEK_SET);
}
}
void CLASS parse_gps (int base)
{
unsigned entries, tag, type, len, save, c;
entries = get2();
while (entries--) {
tiff_get (base, &tag, &type, &len, &save);
switch (tag) {
case 1: case 3: case 5:
gpsdata[29+tag/2] = getc(ifp); break;
case 2: case 4: case 7:
FORC(6) gpsdata[tag/3*6+c] = get4(); break;
case 6:
FORC(2) gpsdata[18+c] = get4(); break;
case 18: case 29:
fgets ((char *) (gpsdata+14+tag/3), MIN(len,12), ifp);
}
fseek (ifp, save, SEEK_SET);
}
}
void CLASS romm_coeff (float romm_cam[3][3])
{
static const float rgb_romm[3][3] = /* ROMM == Kodak ProPhoto */
{ { 2.034193, -0.727420, -0.306766 },
{ -0.228811, 1.231729, -0.002922 },
{ -0.008565, -0.153273, 1.161839 } };
int i, j, k;
for (i=0; i < 3; i++)
for (j=0; j < 3; j++)
for (cmatrix[i][j] = k=0; k < 3; k++)
cmatrix[i][j] += rgb_romm[i][k] * romm_cam[k][j];
}
void CLASS parse_mos (int offset)
{
char data[40];
int skip, from, i, c, neut[4], planes=0, frot=0;
static const char *mod[] =
{ "","DCB2","Volare","Cantare","CMost","Valeo 6","Valeo 11","Valeo 22",
"Valeo 11p","Valeo 17","","Aptus 17","Aptus 22","Aptus 75","Aptus 65",
"Aptus 54S","Aptus 65S","Aptus 75S","AFi 5","AFi 6","AFi 7",
"AFi-II 7","Aptus-II 7","","Aptus-II 6","","","Aptus-II 10","Aptus-II 5",
"","","","","Aptus-II 10R","Aptus-II 8","","Aptus-II 12","","AFi-II 12" };
float romm_cam[3][3];
fseek (ifp, offset, SEEK_SET);
while (1) {
if (get4() != 0x504b5453) break;
get4();
fread (data, 1, 40, ifp);
skip = get4();
from = ftell(ifp);
if (!strcmp(data,"JPEG_preview_data")) {
thumb_offset = from;
thumb_length = skip;
}
if (!strcmp(data,"icc_camera_profile")) {
profile_offset = from;
profile_length = skip;
}
if (!strcmp(data,"ShootObj_back_type")) {
fscanf (ifp, "%d", &i);
if ((unsigned) i < sizeof mod / sizeof (*mod))
strcpy (model, mod[i]);
}
if (!strcmp(data,"icc_camera_to_tone_matrix")) {
for (i=0; i < 9; i++)
((float *)romm_cam)[i] = int_to_float(get4());
romm_coeff (romm_cam);
}
if (!strcmp(data,"CaptProf_color_matrix")) {
for (i=0; i < 9; i++)
fscanf (ifp, "%f", (float *)romm_cam + i);
romm_coeff (romm_cam);
}
if (!strcmp(data,"CaptProf_number_of_planes"))
fscanf (ifp, "%d", &planes);
if (!strcmp(data,"CaptProf_raw_data_rotation"))
fscanf (ifp, "%d", &flip);
if (!strcmp(data,"CaptProf_mosaic_pattern"))
FORC4 {
fscanf (ifp, "%d", &i);
if (i == 1) frot = c ^ (c >> 1);
}
if (!strcmp(data,"ImgProf_rotation_angle")) {
fscanf (ifp, "%d", &i);
flip = i - flip;
}
if (!strcmp(data,"NeutObj_neutrals") && !cam_mul[0]) {
FORC4 fscanf (ifp, "%d", neut+c);
FORC3 cam_mul[c] = (float) neut[0] / neut[c+1];
}
if (!strcmp(data,"Rows_data"))
load_flags = get4();
parse_mos (from);
fseek (ifp, skip+from, SEEK_SET);
}
if (planes)
filters = (planes == 1) * 0x01010101 *
(uchar) "\x94\x61\x16\x49"[(flip/90 + frot) & 3];
}
void CLASS linear_table (unsigned len)
{
int i;
if (len > 0x1000) len = 0x1000;
read_shorts (curve, len);
for (i=len; i < 0x1000; i++)
curve[i] = curve[i-1];
maximum = curve[0xfff];
}
void CLASS parse_kodak_ifd (int base)
{
unsigned entries, tag, type, len, save;
int i, c, wbi=-2, wbtemp=6500;
float mul[3]={1,1,1}, num;
static const int wbtag[] = { 64037,64040,64039,64041,-1,-1,64042 };
entries = get2();
if (entries > 1024) return;
while (entries--) {
tiff_get (base, &tag, &type, &len, &save);
if (tag == 1020) wbi = getint(type);
if (tag == 1021 && len == 72) { /* WB set in software */
fseek (ifp, 40, SEEK_CUR);
FORC3 cam_mul[c] = 2048.0 / get2();
wbi = -2;
}
if (tag == 2118) wbtemp = getint(type);
if (tag == 2120 + wbi && wbi >= 0)
FORC3 cam_mul[c] = 2048.0 / getreal(type);
if (tag == 2130 + wbi)
FORC3 mul[c] = getreal(type);
if (tag == 2140 + wbi && wbi >= 0)
FORC3 {
for (num=i=0; i < 4; i++)
num += getreal(type) * pow (wbtemp/100.0, i);
cam_mul[c] = 2048 / (num * mul[c]);
}
if (tag == 2317) linear_table (len);
if (tag == 6020) iso_speed = getint(type);
if (tag == 64013) wbi = fgetc(ifp);
if ((unsigned) wbi < 7 && tag == wbtag[wbi])
FORC3 cam_mul[c] = get4();
if (tag == 64019) width = getint(type);
if (tag == 64020) height = (getint(type)+1) & -2;
fseek (ifp, save, SEEK_SET);
}
}
void CLASS parse_minolta (int base);
int CLASS parse_tiff (int base);
int CLASS parse_tiff_ifd (int base)
{
unsigned entries, tag, type, len, plen=16, save;
int ifd, use_cm=0, cfa, i, j, c, ima_len=0;
char software[64], *cbuf, *cp;
uchar cfa_pat[16], cfa_pc[] = { 0,1,2,3 }, tab[256];
double cc[4][4], cm[4][3], cam_xyz[4][3], num;
double ab[]={ 1,1,1,1 }, asn[] = { 0,0,0,0 }, xyz[] = { 1,1,1 };
unsigned sony_curve[] = { 0,0,0,0,0,4095 };
unsigned *buf, sony_offset=0, sony_length=0, sony_key=0;
struct jhead jh;
FILE *sfp;
if (tiff_nifds >= sizeof tiff_ifd / sizeof tiff_ifd[0])
return 1;
ifd = tiff_nifds++;
for (j=0; j < 4; j++)
for (i=0; i < 4; i++)
cc[j][i] = i == j;
entries = get2();
if (entries > 512) return 1;
while (entries--) {
tiff_get (base, &tag, &type, &len, &save);
switch (tag) {
case 5: width = get2(); break;
case 6: height = get2(); break;
case 7: width += get2(); break;
case 9: if ((i = get2())) filters = i; break;
case 17: case 18:
if (type == 3 && len == 1)
cam_mul[(tag-17)*2] = get2() / 256.0;
break;
case 23:
if (type == 3) iso_speed = get2();
break;
case 28: case 29: case 30:
cblack[tag-28] = get2();
cblack[3] = cblack[1];
break;
case 36: case 37: case 38:
cam_mul[tag-36] = get2();
break;
case 39:
if (len < 50 || cam_mul[0]) break;
fseek (ifp, 12, SEEK_CUR);
FORC3 cam_mul[c] = get2();
break;
case 46:
if (type != 7 || fgetc(ifp) != 0xff || fgetc(ifp) != 0xd8) break;
thumb_offset = ftell(ifp) - 2;
thumb_length = len;
break;
case 61440: /* Fuji HS10 table */
fseek (ifp, get4()+base, SEEK_SET);
parse_tiff_ifd (base);
break;
case 2: case 256: case 61441: /* ImageWidth */
tiff_ifd[ifd].width = getint(type);
break;
case 3: case 257: case 61442: /* ImageHeight */
tiff_ifd[ifd].height = getint(type);
break;
case 258: /* BitsPerSample */
case 61443:
tiff_ifd[ifd].samples = len & 7;
if ((tiff_ifd[ifd].bps = getint(type)) > 32)
tiff_ifd[ifd].bps = 8;
if (tiff_bps < tiff_ifd[ifd].bps)
tiff_bps = tiff_ifd[ifd].bps;
break;
case 61446:
raw_height = 0;
load_flags = get4() ? 24:80;
break;
case 259: /* Compression */
tiff_ifd[ifd].comp = getint(type);
break;
case 262: /* PhotometricInterpretation */
tiff_ifd[ifd].phint = get2();
break;
case 270: /* ImageDescription */
fread (desc, 512, 1, ifp);
break;
case 271: /* Make */
fgets (make, 64, ifp);
break;
case 272: /* Model */
fgets (model, 64, ifp);
break;
case 280: /* Panasonic RW2 offset */
if (type != 4) break;
load_raw = &CLASS panasonic_load_raw;
load_flags = 0x2008;
case 273: /* StripOffset */
case 513: /* JpegIFOffset */
case 61447:
tiff_ifd[ifd].offset = get4()+base;
if (!tiff_ifd[ifd].bps && tiff_ifd[ifd].offset > 0) {
fseek (ifp, tiff_ifd[ifd].offset, SEEK_SET);
if (ljpeg_start (&jh, 1)) {
tiff_ifd[ifd].comp = 6;
tiff_ifd[ifd].width = jh.wide;
tiff_ifd[ifd].height = jh.high;
tiff_ifd[ifd].bps = jh.bits;
tiff_ifd[ifd].samples = jh.clrs;
if (!(jh.sraw || (jh.clrs & 1)))
tiff_ifd[ifd].width *= jh.clrs;
if ((tiff_ifd[ifd].width > 4*tiff_ifd[ifd].height) & ~jh.clrs) {
tiff_ifd[ifd].width /= 2;
tiff_ifd[ifd].height *= 2;
}
i = order;
parse_tiff (tiff_ifd[ifd].offset + 12);
order = i;
}
}
break;
case 274: /* Orientation */
tiff_ifd[ifd].flip = "50132467"[get2() & 7]-'0';
break;
case 277: /* SamplesPerPixel */
tiff_ifd[ifd].samples = getint(type) & 7;
break;
case 279: /* StripByteCounts */
case 514:
case 61448:
tiff_ifd[ifd].bytes = get4();
break;
case 61454:
FORC3 cam_mul[(4-c) % 3] = getint(type);
break;
case 305: case 11: /* Software */
fgets (software, 64, ifp);
if (!strncmp(software,"Adobe",5) ||
!strncmp(software,"dcraw",5) ||
!strncmp(software,"UFRaw",5) ||
!strncmp(software,"Bibble",6) ||
!strncmp(software,"Nikon Scan",10) ||
!strcmp (software,"Digital Photo Professional"))
is_raw = 0;
break;
case 306: /* DateTime */
get_timestamp(0);
break;
case 315: /* Artist */
fread (artist, 64, 1, ifp);
break;
case 322: /* TileWidth */
tiff_ifd[ifd].tile_width = getint(type);
break;
case 323: /* TileLength */
tiff_ifd[ifd].tile_length = getint(type);
break;
case 324: /* TileOffsets */
tiff_ifd[ifd].offset = len > 1 ? ftell(ifp) : get4();
if (len == 1)
tiff_ifd[ifd].tile_width = tiff_ifd[ifd].tile_length = 0;
if (len == 4) {
load_raw = &CLASS sinar_4shot_load_raw;
is_raw = 5;
}
break;
case 330: /* SubIFDs */
if (!strcmp(model,"DSLR-A100") && tiff_ifd[ifd].width == 3872) {
load_raw = &CLASS sony_arw_load_raw;
data_offset = get4()+base;
ifd++; break;
}
while (len--) {
i = ftell(ifp);
fseek (ifp, get4()+base, SEEK_SET);
if (parse_tiff_ifd (base)) break;
fseek (ifp, i+4, SEEK_SET);
}
break;
case 400:
strcpy (make, "Sarnoff");
maximum = 0xfff;
break;
case 28688:
FORC4 sony_curve[c+1] = get2() >> 2 & 0xfff;
for (i=0; i < 5; i++)
for (j = sony_curve[i]+1; j <= sony_curve[i+1]; j++)
curve[j] = curve[j-1] + (1 << i);
break;
case 29184: sony_offset = get4(); break;
case 29185: sony_length = get4(); break;
case 29217: sony_key = get4(); break;
case 29264:
parse_minolta (ftell(ifp));
raw_width = 0;
break;
case 29443:
FORC4 cam_mul[c ^ (c < 2)] = get2();
break;
case 29459:
FORC4 cam_mul[c] = get2();
i = (cam_mul[1] == 1024 && cam_mul[2] == 1024) << 1;
SWAP (cam_mul[i],cam_mul[i+1])
break;
case 33405: /* Model2 */
fgets (model2, 64, ifp);
break;
case 33421: /* CFARepeatPatternDim */
if (get2() == 6 && get2() == 6)
filters = 9;
break;
case 33422: /* CFAPattern */
if (filters == 9) {
FORC(36) ((char *)xtrans)[c] = fgetc(ifp) & 3;
break;
}
case 64777: /* Kodak P-series */
if ((plen=len) > 16) plen = 16;
fread (cfa_pat, 1, plen, ifp);
for (colors=cfa=i=0; i < plen && colors < 4; i++) {
colors += !(cfa & (1 << cfa_pat[i]));
cfa |= 1 << cfa_pat[i];
}
if (cfa == 070) memcpy (cfa_pc,"\003\004\005",3); /* CMY */
if (cfa == 072) memcpy (cfa_pc,"\005\003\004\001",4); /* GMCY */
goto guess_cfa_pc;
case 33424:
case 65024:
fseek (ifp, get4()+base, SEEK_SET);
parse_kodak_ifd (base);
break;
case 33434: /* ExposureTime */
tiff_ifd[ifd].shutter = shutter = getreal(type);
break;
case 33437: /* FNumber */
aperture = getreal(type);
break;
case 34306: /* Leaf white balance */
FORC4 cam_mul[c ^ 1] = 4096.0 / get2();
break;
case 34307: /* Leaf CatchLight color matrix */
fread (software, 1, 7, ifp);
if (strncmp(software,"MATRIX",6)) break;
colors = 4;
for (raw_color = i=0; i < 3; i++) {
FORC4 fscanf (ifp, "%f", &rgb_cam[i][c^1]);
if (!use_camera_wb) continue;
num = 0;
FORC4 num += rgb_cam[i][c];
FORC4 rgb_cam[i][c] /= num;
}
break;
case 34310: /* Leaf metadata */
parse_mos (ftell(ifp));
case 34303:
strcpy (make, "Leaf");
break;
case 34665: /* EXIF tag */
fseek (ifp, get4()+base, SEEK_SET);
parse_exif (base);
break;
case 34853: /* GPSInfo tag */
fseek (ifp, get4()+base, SEEK_SET);
parse_gps (base);
break;
case 34675: /* InterColorProfile */
case 50831: /* AsShotICCProfile */
profile_offset = ftell(ifp);
profile_length = len;
break;
case 37122: /* CompressedBitsPerPixel */
kodak_cbpp = get4();
break;
case 37386: /* FocalLength */
focal_len = getreal(type);
break;
case 37393: /* ImageNumber */
shot_order = getint(type);
break;
case 37400: /* old Kodak KDC tag */
for (raw_color = i=0; i < 3; i++) {
getreal(type);
FORC3 rgb_cam[i][c] = getreal(type);
}
break;
case 40976:
strip_offset = get4();
switch (tiff_ifd[ifd].comp) {
case 32770: load_raw = &CLASS samsung_load_raw; break;
case 32772: load_raw = &CLASS samsung2_load_raw; break;
case 32773: load_raw = &CLASS samsung3_load_raw; break;
}
break;
case 46275: /* Imacon tags */
strcpy (make, "Imacon");
data_offset = ftell(ifp);
ima_len = len;
break;
case 46279:
if (!ima_len) break;
fseek (ifp, 38, SEEK_CUR);
case 46274:
fseek (ifp, 40, SEEK_CUR);
raw_width = get4();
raw_height = get4();
left_margin = get4() & 7;
width = raw_width - left_margin - (get4() & 7);
top_margin = get4() & 7;
height = raw_height - top_margin - (get4() & 7);
if (raw_width == 7262) {
height = 5444;
width = 7244;
left_margin = 7;
}
fseek (ifp, 52, SEEK_CUR);
FORC3 cam_mul[c] = getreal(11);
fseek (ifp, 114, SEEK_CUR);
flip = (get2() >> 7) * 90;
if (width * height * 6 == ima_len) {
if (flip % 180 == 90) SWAP(width,height);
raw_width = width;
raw_height = height;
left_margin = top_margin = filters = flip = 0;
}
sprintf (model, "Ixpress %d-Mp", height*width/1000000);
load_raw = &CLASS imacon_full_load_raw;
if (filters) {
if (left_margin & 1) filters = 0x61616161;
load_raw = &CLASS unpacked_load_raw;
}
maximum = 0xffff;
break;
case 50454: /* Sinar tag */
case 50455:
if (!(cbuf = (char *) malloc(len))) break;
fread (cbuf, 1, len, ifp);
for (cp = cbuf-1; cp && cp < cbuf+len; cp = strchr(cp,'\n'))
if (!strncmp (++cp,"Neutral ",8))
sscanf (cp+8, "%f %f %f", cam_mul, cam_mul+1, cam_mul+2);
free (cbuf);
break;
case 50458:
if (!make[0]) strcpy (make, "Hasselblad");
break;
case 50459: /* Hasselblad tag */
i = order;
j = ftell(ifp);
c = tiff_nifds;
order = get2();
fseek (ifp, j+(get2(),get4()), SEEK_SET);
parse_tiff_ifd (j);
maximum = 0xffff;
tiff_nifds = c;
order = i;
break;
case 50706: /* DNGVersion */
FORC4 dng_version = (dng_version << 8) + fgetc(ifp);
if (!make[0]) strcpy (make, "DNG");
is_raw = 1;
break;
case 50708: /* UniqueCameraModel */
if (model[0]) break;
fgets (make, 64, ifp);
if ((cp = strchr(make,' '))) {
strcpy(model,cp+1);
*cp = 0;
}
break;
case 50710: /* CFAPlaneColor */
if (filters == 9) break;
if (len > 4) len = 4;
colors = len;
fread (cfa_pc, 1, colors, ifp);
guess_cfa_pc:
FORCC tab[cfa_pc[c]] = c;
cdesc[c] = 0;
for (i=16; i--; )
filters = filters << 2 | tab[cfa_pat[i % plen]];
filters -= !filters;
break;
case 50711: /* CFALayout */
if (get2() == 2) fuji_width = 1;
break;
case 291:
case 50712: /* LinearizationTable */
linear_table (len);
break;
case 50713: /* BlackLevelRepeatDim */
cblack[4] = get2();
cblack[5] = get2();
if (cblack[4] * cblack[5] > sizeof cblack / sizeof *cblack - 6)
cblack[4] = cblack[5] = 1;
break;
case 61450:
cblack[4] = cblack[5] = MIN(sqrt(len),64);
case 50714: /* BlackLevel */
if (!(cblack[4] * cblack[5]))
cblack[4] = cblack[5] = 1;
FORC (cblack[4] * cblack[5])
cblack[6+c] = getreal(type);
black = 0;
break;
case 50715: /* BlackLevelDeltaH */
case 50716: /* BlackLevelDeltaV */
for (num=i=0; i < (len & 0xffff); i++)
num += getreal(type);
black += num/len + 0.5;
break;
case 50717: /* WhiteLevel */
maximum = getint(type);
break;
case 50718: /* DefaultScale */
pixel_aspect = getreal(type);
pixel_aspect /= getreal(type);
break;
case 50721: /* ColorMatrix1 */
case 50722: /* ColorMatrix2 */
FORCC for (j=0; j < 3; j++)
cm[c][j] = getreal(type);
use_cm = 1;
break;
case 50723: /* CameraCalibration1 */
case 50724: /* CameraCalibration2 */
for (i=0; i < colors; i++)
FORCC cc[i][c] = getreal(type);
break;
case 50727: /* AnalogBalance */
FORCC ab[c] = getreal(type);
break;
case 50728: /* AsShotNeutral */
FORCC asn[c] = getreal(type);
break;
case 50729: /* AsShotWhiteXY */
xyz[0] = getreal(type);
xyz[1] = getreal(type);
xyz[2] = 1 - xyz[0] - xyz[1];
FORC3 xyz[c] /= d65_white[c];
break;
case 50740: /* DNGPrivateData */
if (dng_version) break;
parse_minolta (j = get4()+base);
fseek (ifp, j, SEEK_SET);
parse_tiff_ifd (base);
break;
case 50752:
read_shorts (cr2_slice, 3);
break;
case 50829: /* ActiveArea */
top_margin = getint(type);
left_margin = getint(type);
height = getint(type) - top_margin;
width = getint(type) - left_margin;
break;
case 50830: /* MaskedAreas */
for (i=0; i < len && i < 32; i++)
((int *)mask)[i] = getint(type);
black = 0;
break;
case 51009: /* OpcodeList2 */
meta_offset = ftell(ifp);
break;
case 64772: /* Kodak P-series */
if (len < 13) break;
fseek (ifp, 16, SEEK_CUR);
data_offset = get4();
fseek (ifp, 28, SEEK_CUR);
data_offset += get4();
load_raw = &CLASS packed_load_raw;
break;
case 65026:
if (type == 2) fgets (model2, 64, ifp);
}
fseek (ifp, save, SEEK_SET);
}
if (sony_length && (buf = (unsigned *) malloc(sony_length))) {
fseek (ifp, sony_offset, SEEK_SET);
fread (buf, sony_length, 1, ifp);
sony_decrypt (buf, sony_length/4, 1, sony_key);
sfp = ifp;
if ((ifp = tmpfile())) {
fwrite (buf, sony_length, 1, ifp);
fseek (ifp, 0, SEEK_SET);
parse_tiff_ifd (-sony_offset);
fclose (ifp);
}
ifp = sfp;
free (buf);
}
for (i=0; i < colors; i++)
FORCC cc[i][c] *= ab[i];
if (use_cm) {
FORCC for (i=0; i < 3; i++)
for (cam_xyz[c][i]=j=0; j < colors; j++)
cam_xyz[c][i] += cc[c][j] * cm[j][i] * xyz[i];
cam_xyz_coeff (cmatrix, cam_xyz);
}
if (asn[0]) {
cam_mul[3] = 0;
FORCC cam_mul[c] = 1 / asn[c];
}
if (!use_cm)
FORCC pre_mul[c] /= cc[c][c];
return 0;
}
int CLASS parse_tiff (int base)
{
int doff;
fseek (ifp, base, SEEK_SET);
order = get2();
if (order != 0x4949 && order != 0x4d4d) return 0;
get2();
while ((doff = get4())) {
fseek (ifp, doff+base, SEEK_SET);
if (parse_tiff_ifd (base)) break;
}
return 1;
}
void CLASS apply_tiff()
{
int max_samp=0, ties=0, os, ns, raw=-1, thm=-1, i;
struct jhead jh;
thumb_misc = 16;
if (thumb_offset) {
fseek (ifp, thumb_offset, SEEK_SET);
if (ljpeg_start (&jh, 1)) {
thumb_misc = jh.bits;
thumb_width = jh.wide;
thumb_height = jh.high;
}
}
for (i=tiff_nifds; i--; ) {
if (tiff_ifd[i].shutter)
shutter = tiff_ifd[i].shutter;
tiff_ifd[i].shutter = shutter;
}
for (i=0; i < tiff_nifds; i++) {
if (max_samp < tiff_ifd[i].samples)
max_samp = tiff_ifd[i].samples;
if (max_samp > 3) max_samp = 3;
os = raw_width*raw_height;
ns = tiff_ifd[i].width*tiff_ifd[i].height;
if (tiff_bps) {
os *= tiff_bps;
ns *= tiff_ifd[i].bps;
}
if ((tiff_ifd[i].comp != 6 || tiff_ifd[i].samples != 3) &&
(tiff_ifd[i].width | tiff_ifd[i].height) < 0x10000 &&
ns && ((ns > os && (ties = 1)) ||
(ns == os && shot_select == ties++))) {
raw_width = tiff_ifd[i].width;
raw_height = tiff_ifd[i].height;
tiff_bps = tiff_ifd[i].bps;
tiff_compress = tiff_ifd[i].comp;
data_offset = tiff_ifd[i].offset;
tiff_flip = tiff_ifd[i].flip;
tiff_samples = tiff_ifd[i].samples;
tile_width = tiff_ifd[i].tile_width;
tile_length = tiff_ifd[i].tile_length;
shutter = tiff_ifd[i].shutter;
raw = i;
}
}
if (is_raw == 1 && ties) is_raw = ties;
if (!tile_width ) tile_width = INT_MAX;
if (!tile_length) tile_length = INT_MAX;
for (i=tiff_nifds; i--; )
if (tiff_ifd[i].flip) tiff_flip = tiff_ifd[i].flip;
if (raw >= 0 && !load_raw)
switch (tiff_compress) {
case 32767:
if (tiff_ifd[raw].bytes == raw_width*raw_height) {
tiff_bps = 12;
maximum = 4095;
load_raw = &CLASS sony_arw2_load_raw; break;
}
if (tiff_ifd[raw].bytes*8 != raw_width*raw_height*tiff_bps) {
raw_height += 8;
load_raw = &CLASS sony_arw_load_raw; break;
}
load_flags = 79;
case 32769:
load_flags++;
case 32770:
case 32773: goto slr;
case 0: case 1:
if (!strncmp(make,"OLYMPUS",7) &&
tiff_ifd[raw].bytes*2 == raw_width*raw_height*3)
load_flags = 24;
if (!strcmp(make,"SONY") && tiff_bps < 14 &&
tiff_ifd[raw].bytes == raw_width*raw_height*2)
tiff_bps = 14;
if (tiff_ifd[raw].bytes*5 == raw_width*raw_height*8) {
load_flags = 81;
tiff_bps = 12;
} slr:
switch (tiff_bps) {
case 8: load_raw = &CLASS eight_bit_load_raw; break;
case 12: if (tiff_ifd[raw].phint == 2)
load_flags = 6;
load_raw = &CLASS packed_load_raw; break;
case 14: load_raw = &CLASS packed_load_raw;
if (tiff_ifd[raw].bytes*4 == raw_width*raw_height*7) break;
load_flags = 0;
case 16: load_raw = &CLASS unpacked_load_raw;
if (!strncmp(make,"OLYMPUS",7) &&
tiff_ifd[raw].bytes*7 > raw_width*raw_height)
load_raw = &CLASS olympus_load_raw;
}
if (!strcmp(make,"FUJIFILM") && tiff_ifd[raw].bytes*8 < raw_width*raw_height*tiff_bps)
parse_fuji_compressed_header();
break;
case 6: case 7: case 99:
load_raw = &CLASS lossless_jpeg_load_raw; break;
case 262:
load_raw = &CLASS kodak_262_load_raw; break;
case 34713:
if ((raw_width+9)/10*16*raw_height == tiff_ifd[raw].bytes) {
load_raw = &CLASS packed_load_raw;
load_flags = 1;
} else if (raw_width*raw_height*3 == tiff_ifd[raw].bytes*2) {
load_raw = &CLASS packed_load_raw;
if (model[0] == 'N') load_flags = 80;
} else if (raw_width*raw_height*3 == tiff_ifd[raw].bytes) {
load_raw = &CLASS nikon_yuv_load_raw;
gamma_curve (1/2.4, 12.92, 1, 4095);
memset (cblack, 0, sizeof cblack);
filters = 0;
} else if (raw_width*raw_height*2 == tiff_ifd[raw].bytes) {
load_raw = &CLASS unpacked_load_raw;
load_flags = 4;
order = 0x4d4d;
} else
load_raw = &CLASS nikon_load_raw; break;
case 65535:
load_raw = &CLASS pentax_load_raw; break;
case 65000:
switch (tiff_ifd[raw].phint) {
case 2: load_raw = &CLASS kodak_rgb_load_raw; filters = 0; break;
case 6: load_raw = &CLASS kodak_ycbcr_load_raw; filters = 0; break;
case 32803: load_raw = &CLASS kodak_65000_load_raw;
}
case 32867: case 34892: break;
default: is_raw = 0;
}
if (!dng_version)
if ( (tiff_samples == 3 && tiff_ifd[raw].bytes && tiff_bps != 14 &&
(tiff_compress & -16) != 32768)
|| (tiff_bps == 8 && strncmp(make,"Phase",5) &&
!strcasestr(make,"Kodak") && !strstr(model2,"DEBUG RAW")))
is_raw = 0;
for (i=0; i < tiff_nifds; i++)
if (i != raw && tiff_ifd[i].samples == max_samp &&
tiff_ifd[i].width * tiff_ifd[i].height / (SQR(tiff_ifd[i].bps)+1) >
thumb_width * thumb_height / (SQR(thumb_misc)+1)
&& tiff_ifd[i].comp != 34892) {
thumb_width = tiff_ifd[i].width;
thumb_height = tiff_ifd[i].height;
thumb_offset = tiff_ifd[i].offset;
thumb_length = tiff_ifd[i].bytes;
thumb_misc = tiff_ifd[i].bps;
thm = i;
}
if (thm >= 0) {
thumb_misc |= tiff_ifd[thm].samples << 5;
switch (tiff_ifd[thm].comp) {
case 0:
write_thumb = &CLASS layer_thumb;
break;
case 1:
if (tiff_ifd[thm].bps <= 8)
write_thumb = &CLASS ppm_thumb;
else if (!strcmp(make,"Imacon"))
write_thumb = &CLASS ppm16_thumb;
else
thumb_load_raw = &CLASS kodak_thumb_load_raw;
break;
case 65000:
thumb_load_raw = tiff_ifd[thm].phint == 6 ?
&CLASS kodak_ycbcr_load_raw : &CLASS kodak_rgb_load_raw;
}
}
}
void CLASS parse_minolta (int base)
{
int save, tag, len, offset, high=0, wide=0, i, c;
short sorder=order;
fseek (ifp, base, SEEK_SET);
if (fgetc(ifp) || fgetc(ifp)-'M' || fgetc(ifp)-'R') return;
order = fgetc(ifp) * 0x101;
offset = base + get4() + 8;
while ((save=ftell(ifp)) < offset) {
for (tag=i=0; i < 4; i++)
tag = tag << 8 | fgetc(ifp);
len = get4();
switch (tag) {
case 0x505244: /* PRD */
fseek (ifp, 8, SEEK_CUR);
high = get2();
wide = get2();
break;
case 0x574247: /* WBG */
get4();
i = strcmp(model,"DiMAGE A200") ? 0:3;
FORC4 cam_mul[c ^ (c >> 1) ^ i] = get2();
break;
case 0x545457: /* TTW */
parse_tiff (ftell(ifp));
data_offset = offset;
}
fseek (ifp, save+len+8, SEEK_SET);
}
raw_height = high;
raw_width = wide;
order = sorder;
}
/*
Many cameras have a "debug mode" that writes JPEG and raw
at the same time. The raw file has no header, so try to
to open the matching JPEG file and read its metadata.
*/
void CLASS parse_external_jpeg()
{
const char *file, *ext;
char *jname, *jfile, *jext;
FILE *save=ifp;
ext = strrchr (ifname, '.');
file = strrchr (ifname, '/');
if (!file) file = strrchr (ifname, '\\');
if (!file) file = ifname-1;
file++;
if (!ext || strlen(ext) != 4 || ext-file != 8) return;
jname = (char *) malloc (strlen(ifname) + 1);
merror (jname, "parse_external_jpeg()");
strcpy (jname, ifname);
jfile = file - ifname + jname;
jext = ext - ifname + jname;
if (strcasecmp (ext, ".jpg")) {
strcpy (jext, isupper(ext[1]) ? ".JPG":".jpg");
if (isdigit(*file)) {
memcpy (jfile, file+4, 4);
memcpy (jfile+4, file, 4);
}
} else
while (isdigit(*--jext)) {
if (*jext != '9') {
(*jext)++;
break;
}
*jext = '0';
}
if (strcmp (jname, ifname)) {
if ((ifp = fopen (jname, "rb"))) {
if (verbose)
fprintf (stderr,_("Reading metadata from %s ...\n"), jname);
parse_tiff (12);
thumb_offset = 0;
is_raw = 1;
fclose (ifp);
}
}
if (!timestamp)
fprintf (stderr,_("Failed to read metadata from %s\n"), jname);
free (jname);
ifp = save;
}
/*
CIFF block 0x1030 contains an 8x8 white sample.
Load this into white[][] for use in scale_colors().
*/
void CLASS ciff_block_1030()
{
static const ushort key[] = { 0x410, 0x45f3 };
int i, bpp, row, col, vbits=0;
unsigned long bitbuf=0;
if ((get2(),get4()) != 0x80008 || !get4()) return;
bpp = get2();
if (bpp != 10 && bpp != 12) return;
for (i=row=0; row < 8; row++)
for (col=0; col < 8; col++) {
if (vbits < bpp) {
bitbuf = bitbuf << 16 | (get2() ^ key[i++ & 1]);
vbits += 16;
}
white[row][col] = bitbuf >> (vbits -= bpp) & ~(-1 << bpp);
}
}
/*
Parse a CIFF file, better known as Canon CRW format.
*/
void CLASS parse_ciff (int offset, int length, int depth)
{
int tboff, nrecs, c, type, len, save, wbi=-1;
ushort key[] = { 0x410, 0x45f3 };
fseek (ifp, offset+length-4, SEEK_SET);
tboff = get4() + offset;
fseek (ifp, tboff, SEEK_SET);
nrecs = get2();
if ((nrecs | depth) > 127) return;
while (nrecs--) {
type = get2();
len = get4();
save = ftell(ifp) + 4;
fseek (ifp, offset+get4(), SEEK_SET);
if ((((type >> 8) + 8) | 8) == 0x38)
parse_ciff (ftell(ifp), len, depth+1); /* Parse a sub-table */
if (type == 0x0810)
fread (artist, 64, 1, ifp);
if (type == 0x080a) {
fread (make, 64, 1, ifp);
fseek (ifp, strlen(make) - 63, SEEK_CUR);
fread (model, 64, 1, ifp);
}
if (type == 0x1810) {
width = get4();
height = get4();
pixel_aspect = int_to_float(get4());
flip = get4();
}
if (type == 0x1835) /* Get the decoder table */
tiff_compress = get4();
if (type == 0x2007) {
thumb_offset = ftell(ifp);
thumb_length = len;
}
if (type == 0x1818) {
shutter = pow (2, -int_to_float((get4(),get4())));
aperture = pow (2, int_to_float(get4())/2);
}
if (type == 0x102a) {
iso_speed = pow (2, (get4(),get2())/32.0 - 4) * 50;
aperture = pow (2, (get2(),(short)get2())/64.0);
shutter = pow (2,-((short)get2())/32.0);
wbi = (get2(),get2());
if (wbi > 17) wbi = 0;
fseek (ifp, 32, SEEK_CUR);
if (shutter > 1e6) shutter = get2()/10.0;
}
if (type == 0x102c) {
if (get2() > 512) { /* Pro90, G1 */
fseek (ifp, 118, SEEK_CUR);
FORC4 cam_mul[c ^ 2] = get2();
} else { /* G2, S30, S40 */
fseek (ifp, 98, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1) ^ 1] = get2();
}
}
if (type == 0x0032) {
if (len == 768) { /* EOS D30 */
fseek (ifp, 72, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1)] = 1024.0 / get2();
if (!wbi) cam_mul[0] = -1; /* use my auto white balance */
} else if (!cam_mul[0]) {
if (get2() == key[0]) /* Pro1, G6, S60, S70 */
c = (strstr(model,"Pro1") ?
"012346000000000000":"01345:000000006008")[wbi]-'0'+ 2;
else { /* G3, G5, S45, S50 */
c = "023457000000006000"[wbi]-'0';
key[0] = key[1] = 0;
}
fseek (ifp, 78 + c*8, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1) ^ 1] = get2() ^ key[c & 1];
if (!wbi) cam_mul[0] = -1;
}
}
if (type == 0x10a9) { /* D60, 10D, 300D, and clones */
if (len > 66) wbi = "0134567028"[wbi]-'0';
fseek (ifp, 2 + wbi*8, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1)] = get2();
}
if (type == 0x1030 && (0x18040 >> wbi & 1))
ciff_block_1030(); /* all that don't have 0x10a9 */
if (type == 0x1031) {
raw_width = (get2(),get2());
raw_height = get2();
}
if (type == 0x5029) {
focal_len = len >> 16;
if ((len & 0xffff) == 2) focal_len /= 32;
}
if (type == 0x5813) flash_used = int_to_float(len);
if (type == 0x5814) canon_ev = int_to_float(len);
if (type == 0x5817) shot_order = len;
if (type == 0x5834) unique_id = len;
if (type == 0x580e) timestamp = len;
if (type == 0x180e) timestamp = get4();
#ifdef LOCALTIME
if ((type | 0x4000) == 0x580e)
timestamp = mktime (gmtime (×tamp));
#endif
fseek (ifp, save, SEEK_SET);
}
}
void CLASS parse_rollei()
{
char line[128], *val;
struct tm t;
fseek (ifp, 0, SEEK_SET);
memset (&t, 0, sizeof t);
do {
fgets (line, 128, ifp);
if ((val = strchr(line,'=')))
*val++ = 0;
else
val = line + strlen(line);
if (!strcmp(line,"DAT"))
sscanf (val, "%d.%d.%d", &t.tm_mday, &t.tm_mon, &t.tm_year);
if (!strcmp(line,"TIM"))
sscanf (val, "%d:%d:%d", &t.tm_hour, &t.tm_min, &t.tm_sec);
if (!strcmp(line,"HDR"))
thumb_offset = atoi(val);
if (!strcmp(line,"X "))
raw_width = atoi(val);
if (!strcmp(line,"Y "))
raw_height = atoi(val);
if (!strcmp(line,"TX "))
thumb_width = atoi(val);
if (!strcmp(line,"TY "))
thumb_height = atoi(val);
} while (strncmp(line,"EOHD",4));
data_offset = thumb_offset + thumb_width * thumb_height * 2;
t.tm_year -= 1900;
t.tm_mon -= 1;
if (mktime(&t) > 0)
timestamp = mktime(&t);
strcpy (make, "Rollei");
strcpy (model,"d530flex");
write_thumb = &CLASS rollei_thumb;
}
void CLASS parse_sinar_ia()
{
int entries, off;
char str[8], *cp;
order = 0x4949;
fseek (ifp, 4, SEEK_SET);
entries = get4();
fseek (ifp, get4(), SEEK_SET);
while (entries--) {
off = get4(); get4();
fread (str, 8, 1, ifp);
if (!strcmp(str,"META")) meta_offset = off;
if (!strcmp(str,"THUMB")) thumb_offset = off;
if (!strcmp(str,"RAW0")) data_offset = off;
}
fseek (ifp, meta_offset+20, SEEK_SET);
fread (make, 64, 1, ifp);
make[63] = 0;
if ((cp = strchr(make,' '))) {
strcpy (model, cp+1);
*cp = 0;
}
raw_width = get2();
raw_height = get2();
load_raw = &CLASS unpacked_load_raw;
thumb_width = (get4(),get2());
thumb_height = get2();
write_thumb = &CLASS ppm_thumb;
maximum = 0x3fff;
}
void CLASS parse_phase_one (int base)
{
unsigned entries, tag, type, len, data, save, i, c;
float romm_cam[3][3];
char *cp;
memset (&ph1, 0, sizeof ph1);
fseek (ifp, base, SEEK_SET);
order = get4() & 0xffff;
if (get4() >> 8 != 0x526177) return; /* "Raw" */
fseek (ifp, get4()+base, SEEK_SET);
entries = get4();
get4();
while (entries--) {
tag = get4();
type = get4();
len = get4();
data = get4();
save = ftell(ifp);
fseek (ifp, base+data, SEEK_SET);
switch (tag) {
case 0x100: flip = "0653"[data & 3]-'0'; break;
case 0x106:
for (i=0; i < 9; i++)
((float *)romm_cam)[i] = getreal(11);
romm_coeff (romm_cam);
break;
case 0x107:
FORC3 cam_mul[c] = getreal(11);
break;
case 0x108: raw_width = data; break;
case 0x109: raw_height = data; break;
case 0x10a: left_margin = data; break;
case 0x10b: top_margin = data; break;
case 0x10c: width = data; break;
case 0x10d: height = data; break;
case 0x10e: ph1.format = data; break;
case 0x10f: data_offset = data+base; break;
case 0x110: meta_offset = data+base;
meta_length = len; break;
case 0x112: ph1.key_off = save - 4; break;
case 0x210: ph1.tag_210 = int_to_float(data); break;
case 0x21a: ph1.tag_21a = data; break;
case 0x21c: strip_offset = data+base; break;
case 0x21d: ph1.black = data; break;
case 0x222: ph1.split_col = data; break;
case 0x223: ph1.black_col = data+base; break;
case 0x224: ph1.split_row = data; break;
case 0x225: ph1.black_row = data+base; break;
case 0x301:
model[63] = 0;
fread (model, 1, 63, ifp);
if ((cp = strstr(model," camera"))) *cp = 0;
}
fseek (ifp, save, SEEK_SET);
}
load_raw = ph1.format < 3 ?
&CLASS phase_one_load_raw : &CLASS phase_one_load_raw_c;
maximum = 0xffff;
strcpy (make, "Phase One");
if (model[0]) return;
switch (raw_height) {
case 2060: strcpy (model,"LightPhase"); break;
case 2682: strcpy (model,"H 10"); break;
case 4128: strcpy (model,"H 20"); break;
case 5488: strcpy (model,"H 25"); break;
}
}
void CLASS parse_fuji (int offset)
{
unsigned entries, tag, len, save, c;
fseek (ifp, offset, SEEK_SET);
entries = get4();
if (entries > 255) return;
while (entries--) {
tag = get2();
len = get2();
save = ftell(ifp);
if (tag == 0x100) {
raw_height = get2();
raw_width = get2();
} else if (tag == 0x121) {
height = get2();
if ((width = get2()) == 4284) width += 3;
} else if (tag == 0x130) {
fuji_layout = fgetc(ifp) >> 7;
fuji_width = !(fgetc(ifp) & 8);
} else if (tag == 0x131) {
filters = 9;
FORC(36) xtrans_abs[0][35-c] = fgetc(ifp) & 3;
} else if (tag == 0x2ff0) {
FORC4 cam_mul[c ^ 1] = get2();
} else if (tag == 0xc000 && len > 20000) {
c = order;
order = 0x4949;
while ((tag = get4()) > raw_width);
width = tag;
height = get4();
order = c;
}
fseek (ifp, save+len, SEEK_SET);
}
height <<= fuji_layout;
width >>= fuji_layout;
}
int CLASS parse_jpeg (int offset)
{
int len, save, hlen, mark;
fseek (ifp, offset, SEEK_SET);
if (fgetc(ifp) != 0xff || fgetc(ifp) != 0xd8) return 0;
while (fgetc(ifp) == 0xff && (mark = fgetc(ifp)) != 0xda) {
order = 0x4d4d;
len = get2() - 2;
save = ftell(ifp);
if (mark == 0xc0 || mark == 0xc3 || mark == 0xc9) {
fgetc(ifp);
raw_height = get2();
raw_width = get2();
}
order = get2();
hlen = get4();
if (get4() == 0x48454150) /* "HEAP" */
parse_ciff (save+hlen, len-hlen, 0);
if (parse_tiff (save+6)) apply_tiff();
fseek (ifp, save+len, SEEK_SET);
}
return 1;
}
void CLASS parse_riff()
{
unsigned i, size, end;
char tag[4], date[64], month[64];
static const char mon[12][4] =
{ "Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec" };
struct tm t;
order = 0x4949;
fread (tag, 4, 1, ifp);
size = get4();
end = ftell(ifp) + size;
if (!memcmp(tag,"RIFF",4) || !memcmp(tag,"LIST",4)) {
get4();
while (ftell(ifp)+7 < end && !feof(ifp))
parse_riff();
} else if (!memcmp(tag,"nctg",4)) {
while (ftell(ifp)+7 < end) {
i = get2();
size = get2();
if ((i+1) >> 1 == 10 && size == 20)
get_timestamp(0);
else fseek (ifp, size, SEEK_CUR);
}
} else if (!memcmp(tag,"IDIT",4) && size < 64) {
fread (date, 64, 1, ifp);
date[size] = 0;
memset (&t, 0, sizeof t);
if (sscanf (date, "%*s %s %d %d:%d:%d %d", month, &t.tm_mday,
&t.tm_hour, &t.tm_min, &t.tm_sec, &t.tm_year) == 6) {
for (i=0; i < 12 && strcasecmp(mon[i],month); i++);
t.tm_mon = i;
t.tm_year -= 1900;
if (mktime(&t) > 0)
timestamp = mktime(&t);
}
} else
fseek (ifp, size, SEEK_CUR);
}
void CLASS parse_crx (int end)
{
unsigned i, save, size, tag, base;
static int index=0, wide, high, off, len;
order = 0x4d4d;
while (ftell(ifp)+7 < end) {
save = ftell(ifp);
if ((size = get4()) < 8) break;
switch (tag = get4()) {
case 0x6d6f6f76: /* moov */
case 0x7472616b: /* trak */
case 0x6d646961: /* mdia */
case 0x6d696e66: /* minf */
case 0x7374626c: /* stbl */
parse_crx (save+size);
break;
case 0x75756964: /* uuid */
switch (i=get4()) {
case 0xeaf42b5e: fseek (ifp, 8, SEEK_CUR);
case 0x85c0b687: fseek (ifp, 12, SEEK_CUR);
parse_crx (save+size);
}
break;
case 0x434d5431: /* CMT1 */
case 0x434d5432: /* CMT2 */
base = ftell(ifp);
order = get2();
fseek (ifp, 6, SEEK_CUR);
tag & 1 ? parse_tiff_ifd (base) : parse_exif (base);
order = 0x4d4d;
break;
case 0x746b6864: /* tkhd */
fseek (ifp, 12, SEEK_CUR);
index = get4();
fseek (ifp, 58, SEEK_CUR);
wide = get4();
high = get4();
break;
case 0x7374737a: /* stsz */
len = (get4(),get4());
break;
case 0x636f3634: /* co64 */
fseek (ifp, 12, SEEK_CUR);
off = get4();
switch (index) {
case 1: /* 1 = full size, 2 = 27% size */
thumb_width = wide;
thumb_height = high;
thumb_length = len;
thumb_offset = off;
break;
case 3:
raw_width = wide;
raw_height = high;
data_offset = off;
load_raw = &CLASS canon_crx_load_raw;
}
break;
case 0x50525657: /* PRVW */
fseek (ifp, 6, SEEK_CUR);
}
fseek (ifp, save+size, SEEK_SET);
}
}
void CLASS parse_qt (int end)
{
unsigned save, size;
char tag[4];
order = 0x4d4d;
while (ftell(ifp)+7 < end) {
save = ftell(ifp);
if ((size = get4()) < 8) return;
fread (tag, 4, 1, ifp);
if (!memcmp(tag,"moov",4) ||
!memcmp(tag,"udta",4) ||
!memcmp(tag,"CNTH",4))
parse_qt (save+size);
if (!memcmp(tag,"CNDA",4))
parse_jpeg (ftell(ifp));
fseek (ifp, save+size, SEEK_SET);
}
}
void CLASS parse_smal (int offset, int fsize)
{
int ver;
fseek (ifp, offset+2, SEEK_SET);
order = 0x4949;
ver = fgetc(ifp);
if (ver == 6)
fseek (ifp, 5, SEEK_CUR);
if (get4() != fsize) return;
if (ver > 6) data_offset = get4();
raw_height = height = get2();
raw_width = width = get2();
strcpy (make, "SMaL");
sprintf (model, "v%d %dx%d", ver, width, height);
if (ver == 6) load_raw = &CLASS smal_v6_load_raw;
if (ver == 9) load_raw = &CLASS smal_v9_load_raw;
}
void CLASS parse_cine()
{
unsigned off_head, off_setup, off_image, i;
order = 0x4949;
fseek (ifp, 4, SEEK_SET);
is_raw = get2() == 2;
fseek (ifp, 14, SEEK_CUR);
is_raw *= get4();
off_head = get4();
off_setup = get4();
off_image = get4();
timestamp = get4();
if ((i = get4())) timestamp = i;
fseek (ifp, off_head+4, SEEK_SET);
raw_width = get4();
raw_height = get4();
switch (get2(),get2()) {
case 8: load_raw = &CLASS eight_bit_load_raw; break;
case 16: load_raw = &CLASS unpacked_load_raw;
}
fseek (ifp, off_setup+792, SEEK_SET);
strcpy (make, "CINE");
sprintf (model, "%d", get4());
fseek (ifp, 12, SEEK_CUR);
switch ((i=get4()) & 0xffffff) {
case 3: filters = 0x94949494; break;
case 4: filters = 0x49494949; break;
default: is_raw = 0;
}
fseek (ifp, 72, SEEK_CUR);
switch ((get4()+3600) % 360) {
case 270: flip = 4; break;
case 180: flip = 1; break;
case 90: flip = 7; break;
case 0: flip = 2;
}
cam_mul[0] = getreal(11);
cam_mul[2] = getreal(11);
maximum = ~(-1 << get4());
fseek (ifp, 668, SEEK_CUR);
shutter = get4()/1000000000.0;
fseek (ifp, off_image, SEEK_SET);
if (shot_select < is_raw)
fseek (ifp, shot_select*8, SEEK_CUR);
data_offset = (INT64) get4() + 8;
data_offset += (INT64) get4() << 32;
}
void CLASS parse_redcine()
{
unsigned i, len, rdvo;
order = 0x4d4d;
is_raw = 0;
fseek (ifp, 52, SEEK_SET);
width = get4();
height = get4();
fseek (ifp, 0, SEEK_END);
fseek (ifp, -(i = ftello(ifp) & 511), SEEK_CUR);
if (get4() != i || get4() != 0x52454f42) {
fprintf (stderr,_("%s: Tail is missing, parsing from head...\n"), ifname);
fseek (ifp, 0, SEEK_SET);
while ((len = get4()) != EOF) {
if (get4() == 0x52454456)
if (is_raw++ == shot_select)
data_offset = ftello(ifp) - 8;
fseek (ifp, len-8, SEEK_CUR);
}
} else {
rdvo = get4();
fseek (ifp, 12, SEEK_CUR);
is_raw = get4();
fseeko (ifp, rdvo+8 + shot_select*4, SEEK_SET);
data_offset = get4();
}
}
char * CLASS foveon_gets (int offset, char *str, int len)
{
int i;
fseek (ifp, offset, SEEK_SET);
for (i=0; i < len-1; i++)
if ((str[i] = get2()) == 0) break;
str[i] = 0;
return str;
}
void CLASS parse_foveon()
{
int entries, img=0, off, len, tag, save, i, wide, high, pent, poff[256][2];
char name[64], value[64];
order = 0x4949; /* Little-endian */
fseek (ifp, 36, SEEK_SET);
flip = get4();
fseek (ifp, -4, SEEK_END);
fseek (ifp, get4(), SEEK_SET);
if (get4() != 0x64434553) return; /* SECd */
entries = (get4(),get4());
while (entries--) {
off = get4();
len = get4();
tag = get4();
save = ftell(ifp);
fseek (ifp, off, SEEK_SET);
if (get4() != (0x20434553 | (tag << 24))) return;
switch (tag) {
case 0x47414d49: /* IMAG */
case 0x32414d49: /* IMA2 */
fseek (ifp, 8, SEEK_CUR);
pent = get4();
wide = get4();
high = get4();
if (wide > raw_width && high > raw_height) {
switch (pent) {
case 5: load_flags = 1;
// case 6: load_raw = &CLASS foveon_sd_load_raw; break;
// case 30: load_raw = &CLASS foveon_dp_load_raw; break;
default: load_raw = 0;
}
raw_width = wide;
raw_height = high;
data_offset = off+28;
is_foveon = 1;
}
fseek (ifp, off+28, SEEK_SET);
if (fgetc(ifp) == 0xff && fgetc(ifp) == 0xd8
&& thumb_length < len-28) {
thumb_offset = off+28;
thumb_length = len-28;
write_thumb = &CLASS jpeg_thumb;
}
if (++img == 2 && !thumb_length) {
thumb_offset = off+24;
thumb_width = wide;
thumb_height = high;
// write_thumb = &CLASS foveon_thumb;
}
break;
case 0x464d4143: /* CAMF */
meta_offset = off+8;
meta_length = len-28;
break;
case 0x504f5250: /* PROP */
pent = (get4(),get4());
fseek (ifp, 12, SEEK_CUR);
off += pent*8 + 24;
if ((unsigned) pent > 256) pent=256;
for (i=0; i < pent*2; i++)
((int *)poff)[i] = off + get4()*2;
for (i=0; i < pent; i++) {
foveon_gets (poff[i][0], name, 64);
foveon_gets (poff[i][1], value, 64);
if (!strcmp (name, "ISO"))
iso_speed = atoi(value);
if (!strcmp (name, "CAMMANUF"))
strcpy (make, value);
if (!strcmp (name, "CAMMODEL"))
strcpy (model, value);
if (!strcmp (name, "WB_DESC"))
strcpy (model2, value);
if (!strcmp (name, "TIME"))
timestamp = atoi(value);
if (!strcmp (name, "EXPTIME"))
shutter = atoi(value) / 1000000.0;
if (!strcmp (name, "APERTURE"))
aperture = atof(value);
if (!strcmp (name, "FLENGTH"))
focal_len = atof(value);
}
#ifdef LOCALTIME
timestamp = mktime (gmtime (×tamp));
#endif
}
fseek (ifp, save, SEEK_SET);
}
}
/*
All matrices are from Adobe DNG Converter unless otherwise noted.
*/
void CLASS adobe_coeff (const char *make, const char *model)
{
static const struct {
const char *prefix;
short black, maximum, trans[12];
} table[] = {
{ "AgfaPhoto DC-833m", 0, 0, /* DJC */
{ 11438,-3762,-1115,-2409,9914,2497,-1227,2295,5300 } },
{ "Apple QuickTake", 0, 0, /* DJC */
{ 21392,-5653,-3353,2406,8010,-415,7166,1427,2078 } },
{ "Canon EOS D2000", 0, 0,
{ 24542,-10860,-3401,-1490,11370,-297,2858,-605,3225 } },
{ "Canon EOS D6000", 0, 0,
{ 20482,-7172,-3125,-1033,10410,-285,2542,226,3136 } },
{ "Canon EOS D30", 0, 0,
{ 9805,-2689,-1312,-5803,13064,3068,-2438,3075,8775 } },
{ "Canon EOS D60", 0, 0xfa0,
{ 6188,-1341,-890,-7168,14489,2937,-2640,3228,8483 } },
{ "Canon EOS 5DS", 0, 0x3c96,
{ 6250,-711,-808,-5153,12794,2636,-1249,2198,5610 } },
{ "Canon EOS 5D Mark IV", 0, 0,
{ 6446,-366,-864,-4436,12204,2513,-952,2496,6348 } },
{ "Canon EOS 5D Mark III", 0, 0x3c80,
{ 6722,-635,-963,-4287,12460,2028,-908,2162,5668 } },
{ "Canon EOS 5D Mark II", 0, 0x3cf0,
{ 4716,603,-830,-7798,15474,2480,-1496,1937,6651 } },
{ "Canon EOS 5D", 0, 0xe6c,
{ 6347,-479,-972,-8297,15954,2480,-1968,2131,7649 } },
{ "Canon EOS 6D Mark II", 0, 0,
{ 6875,-970,-932,-4691,12459,2501,-874,1953,5809 } },
{ "Canon EOS 6D", 0, 0x3c82,
{ 7034,-804,-1014,-4420,12564,2058,-851,1994,5758 } },
{ "Canon EOS 7D Mark II", 0, 0x3510,
{ 7268,-1082,-969,-4186,11839,2663,-825,2029,5839 } },
{ "Canon EOS 7D", 0, 0x3510,
{ 6844,-996,-856,-3876,11761,2396,-593,1772,6198 } },
{ "Canon EOS 10D", 0, 0xfa0,
{ 8197,-2000,-1118,-6714,14335,2592,-2536,3178,8266 } },
{ "Canon EOS 20Da", 0, 0,
{ 14155,-5065,-1382,-6550,14633,2039,-1623,1824,6561 } },
{ "Canon EOS 20D", 0, 0xfff,
{ 6599,-537,-891,-8071,15783,2424,-1983,2234,7462 } },
{ "Canon EOS 30D", 0, 0,
{ 6257,-303,-1000,-7880,15621,2396,-1714,1904,7046 } },
{ "Canon EOS 40D", 0, 0x3f60,
{ 6071,-747,-856,-7653,15365,2441,-2025,2553,7315 } },
{ "Canon EOS 50D", 0, 0x3d93,
{ 4920,616,-593,-6493,13964,2784,-1774,3178,7005 } },
{ "Canon EOS 60D", 0, 0x2ff7,
{ 6719,-994,-925,-4408,12426,2211,-887,2129,6051 } },
{ "Canon EOS 70D", 0, 0x3bc7,
{ 7034,-804,-1014,-4420,12564,2058,-851,1994,5758 } },
{ "Canon EOS 77D", 0, 0,
{ 7377,-742,-998,-4235,11981,2549,-673,1918,5538 } },
{ "Canon EOS 80D", 0, 0,
{ 7457,-671,-937,-4849,12495,2643,-1213,2354,5492 } },
{ "Canon EOS 100D", 0, 0x350f,
{ 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } },
{ "Canon EOS 200D", 0, 0,
{ 7377,-742,-998,-4235,11981,2549,-673,1918,5538 } },
{ "Canon EOS 300D", 0, 0xfa0,
{ 8197,-2000,-1118,-6714,14335,2592,-2536,3178,8266 } },
{ "Canon EOS 350D", 0, 0xfff,
{ 6018,-617,-965,-8645,15881,2975,-1530,1719,7642 } },
{ "Canon EOS 400D", 0, 0xe8e,
{ 7054,-1501,-990,-8156,15544,2812,-1278,1414,7796 } },
{ "Canon EOS 450D", 0, 0x390d,
{ 5784,-262,-821,-7539,15064,2672,-1982,2681,7427 } },
{ "Canon EOS 500D", 0, 0x3479,
{ 4763,712,-646,-6821,14399,2640,-1921,3276,6561 } },
{ "Canon EOS 550D", 0, 0x3dd7,
{ 6941,-1164,-857,-3825,11597,2534,-416,1540,6039 } },
{ "Canon EOS 600D", 0, 0x3510,
{ 6461,-907,-882,-4300,12184,2378,-819,1944,5931 } },
{ "Canon EOS 650D", 0, 0x354d,
{ 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } },
{ "Canon EOS 700D", 0, 0x3c00,
{ 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } },
{ "Canon EOS 750D", 0, 0x368e,
{ 6362,-823,-847,-4426,12109,2616,-743,1857,5635 } },
{ "Canon EOS 760D", 0, 0x350f,
{ 6362,-823,-847,-4426,12109,2616,-743,1857,5635 } },
{ "Canon EOS 800D", 0, 0,
{ 6970,-512,-968,-4425,12161,2553,-739,1982,5601 } },
{ "Canon EOS 1000D", 0, 0xe43,
{ 6771,-1139,-977,-7818,15123,2928,-1244,1437,7533 } },
{ "Canon EOS 1100D", 0, 0x3510,
{ 6444,-904,-893,-4563,12308,2535,-903,2016,6728 } },
{ "Canon EOS 1200D", 0, 0x37c2,
{ 6461,-907,-882,-4300,12184,2378,-819,1944,5931 } },
{ "Canon EOS 1300D", 0, 0x3510,
{ 6939,-1016,-866,-4428,12473,2177,-1175,2178,6162 } },
{ "Canon EOS 1500D", 0, 0,
{ 8532,-701,-1167,-4095,11879,2508,-797,2424,7010 } },
{ "Canon EOS 3000D", 0, 0,
{ 6939,-1016,-866,-4428,12473,2177,-1175,2178,6162 } },
{ "Canon EOS M6", 0, 0,
{ 8532,-701,-1167,-4095,11879,2508,-797,2424,7010 } },
{ "Canon EOS M5", 0, 0, /* also M50 */
{ 8532,-701,-1167,-4095,11879,2508,-797,2424,7010 } },
{ "Canon EOS M3", 0, 0,
{ 6362,-823,-847,-4426,12109,2616,-743,1857,5635 } },
{ "Canon EOS M100", 0, 0,
{ 8532,-701,-1167,-4095,11879,2508,-797,2424,7010 } },
{ "Canon EOS M10", 0, 0,
{ 6400,-480,-888,-5294,13416,2047,-1296,2203,6137 } },
{ "Canon EOS M", 0, 0,
{ 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } },
{ "Canon EOS-1Ds Mark III", 0, 0x3bb0,
{ 5859,-211,-930,-8255,16017,2353,-1732,1887,7448 } },
{ "Canon EOS-1Ds Mark II", 0, 0xe80,
{ 6517,-602,-867,-8180,15926,2378,-1618,1771,7633 } },
{ "Canon EOS-1D Mark IV", 0, 0x3bb0,
{ 6014,-220,-795,-4109,12014,2361,-561,1824,5787 } },
{ "Canon EOS-1D Mark III", 0, 0x3bb0,
{ 6291,-540,-976,-8350,16145,2311,-1714,1858,7326 } },
{ "Canon EOS-1D Mark II N", 0, 0xe80,
{ 6240,-466,-822,-8180,15825,2500,-1801,1938,8042 } },
{ "Canon EOS-1D Mark II", 0, 0xe80,
{ 6264,-582,-724,-8312,15948,2504,-1744,1919,8664 } },
{ "Canon EOS-1DS", 0, 0xe20,
{ 4374,3631,-1743,-7520,15212,2472,-2892,3632,8161 } },
{ "Canon EOS-1D C", 0, 0x3c4e,
{ 6847,-614,-1014,-4669,12737,2139,-1197,2488,6846 } },
{ "Canon EOS-1D X Mark II", 0, 0,
{ 7596,-978,-967,-4808,12571,2503,-1398,2567,5752 } },
{ "Canon EOS-1D X", 0, 0x3c4e,
{ 6847,-614,-1014,-4669,12737,2139,-1197,2488,6846 } },
{ "Canon EOS-1D", 0, 0xe20,
{ 6806,-179,-1020,-8097,16415,1687,-3267,4236,7690 } },
{ "Canon EOS C500", 853, 0, /* DJC */
{ 17851,-10604,922,-7425,16662,763,-3660,3636,22278 } },
{ "Canon PowerShot A530", 0, 0,
{ 0 } }, /* don't want the A5 matrix */
{ "Canon PowerShot A50", 0, 0,
{ -5300,9846,1776,3436,684,3939,-5540,9879,6200,-1404,11175,217 } },
{ "Canon PowerShot A5", 0, 0,
{ -4801,9475,1952,2926,1611,4094,-5259,10164,5947,-1554,10883,547 } },
{ "Canon PowerShot G10", 0, 0,
{ 11093,-3906,-1028,-5047,12492,2879,-1003,1750,5561 } },
{ "Canon PowerShot G11", 0, 0,
{ 12177,-4817,-1069,-1612,9864,2049,-98,850,4471 } },
{ "Canon PowerShot G12", 0, 0,
{ 13244,-5501,-1248,-1508,9858,1935,-270,1083,4366 } },
{ "Canon PowerShot G15", 0, 0,
{ 7474,-2301,-567,-4056,11456,2975,-222,716,4181 } },
{ "Canon PowerShot G16", 0, 0,
{ 8020,-2687,-682,-3704,11879,2052,-965,1921,5556 } },
{ "Canon PowerShot G1 X Mark III", 0, 0,
{ 8532,-701,-1167,-4095,11879,2508,-797,2424,7010 } },
{ "Canon PowerShot G1 X", 0, 0,
{ 7378,-1255,-1043,-4088,12251,2048,-876,1946,5805 } },
{ "Canon PowerShot G1", 0, 0,
{ -4778,9467,2172,4743,-1141,4344,-5146,9908,6077,-1566,11051,557 } },
{ "Canon PowerShot G2", 0, 0,
{ 9087,-2693,-1049,-6715,14382,2537,-2291,2819,7790 } },
{ "Canon PowerShot G3 X", 0, 0,
{ 9701,-3857,-921,-3149,11537,1817,-786,1817,5147 } },
{ "Canon PowerShot G3", 0, 0,
{ 9212,-2781,-1073,-6573,14189,2605,-2300,2844,7664 } },
{ "Canon PowerShot G5 X", 0, 0,
{ 9602,-3823,-937,-2984,11495,1675,-407,1415,5049 } },
{ "Canon PowerShot G5", 0, 0,
{ 9757,-2872,-933,-5972,13861,2301,-1622,2328,7212 } },
{ "Canon PowerShot G6", 0, 0,
{ 9877,-3775,-871,-7613,14807,3072,-1448,1305,7485 } },
{ "Canon PowerShot G7 X", 0, 0,
{ 9602,-3823,-937,-2984,11495,1675,-407,1415,5049 } },
{ "Canon PowerShot G9 X Mark II", 0, 0,
{ 10056,-4131,-944,-2576,11143,1625,-238,1294,5179 } },
{ "Canon PowerShot G9 X", 0, 0,
{ 9602,-3823,-937,-2984,11495,1675,-407,1415,5049 } },
{ "Canon PowerShot G9", 0, 0,
{ 7368,-2141,-598,-5621,13254,2625,-1418,1696,5743 } },
{ "Canon PowerShot Pro1", 0, 0,
{ 10062,-3522,-999,-7643,15117,2730,-765,817,7323 } },
{ "Canon PowerShot Pro70", 34, 0,
{ -4155,9818,1529,3939,-25,4522,-5521,9870,6610,-2238,10873,1342 } },
{ "Canon PowerShot Pro90", 0, 0,
{ -4963,9896,2235,4642,-987,4294,-5162,10011,5859,-1770,11230,577 } },
{ "Canon PowerShot S30", 0, 0,
{ 10566,-3652,-1129,-6552,14662,2006,-2197,2581,7670 } },
{ "Canon PowerShot S40", 0, 0,
{ 8510,-2487,-940,-6869,14231,2900,-2318,2829,9013 } },
{ "Canon PowerShot S45", 0, 0,
{ 8163,-2333,-955,-6682,14174,2751,-2077,2597,8041 } },
{ "Canon PowerShot S50", 0, 0,
{ 8882,-2571,-863,-6348,14234,2288,-1516,2172,6569 } },
{ "Canon PowerShot S60", 0, 0,
{ 8795,-2482,-797,-7804,15403,2573,-1422,1996,7082 } },
{ "Canon PowerShot S70", 0, 0,
{ 9976,-3810,-832,-7115,14463,2906,-901,989,7889 } },
{ "Canon PowerShot S90", 0, 0,
{ 12374,-5016,-1049,-1677,9902,2078,-83,852,4683 } },
{ "Canon PowerShot S95", 0, 0,
{ 13440,-5896,-1279,-1236,9598,1931,-180,1001,4651 } },
{ "Canon PowerShot S100", 0, 0,
{ 7968,-2565,-636,-2873,10697,2513,180,667,4211 } },
{ "Canon PowerShot S110", 0, 0,
{ 8039,-2643,-654,-3783,11230,2930,-206,690,4194 } },
{ "Canon PowerShot S120", 0, 0,
{ 6961,-1685,-695,-4625,12945,1836,-1114,2152,5518 } },
{ "Canon PowerShot SX1 IS", 0, 0,
{ 6578,-259,-502,-5974,13030,3309,-308,1058,4970 } },
{ "Canon PowerShot SX50 HS", 0, 0,
{ 12432,-4753,-1247,-2110,10691,1629,-412,1623,4926 } },
{ "Canon PowerShot SX60 HS", 0, 0,
{ 13161,-5451,-1344,-1989,10654,1531,-47,1271,4955 } },
{ "Canon PowerShot A3300", 0, 0, /* DJC */
{ 10826,-3654,-1023,-3215,11310,1906,0,999,4960 } },
{ "Canon PowerShot A470", 0, 0, /* DJC */
{ 12513,-4407,-1242,-2680,10276,2405,-878,2215,4734 } },
{ "Canon PowerShot A610", 0, 0, /* DJC */
{ 15591,-6402,-1592,-5365,13198,2168,-1300,1824,5075 } },
{ "Canon PowerShot A620", 0, 0, /* DJC */
{ 15265,-6193,-1558,-4125,12116,2010,-888,1639,5220 } },
{ "Canon PowerShot A630", 0, 0, /* DJC */
{ 14201,-5308,-1757,-6087,14472,1617,-2191,3105,5348 } },
{ "Canon PowerShot A640", 0, 0, /* DJC */
{ 13124,-5329,-1390,-3602,11658,1944,-1612,2863,4885 } },
{ "Canon PowerShot A650", 0, 0, /* DJC */
{ 9427,-3036,-959,-2581,10671,1911,-1039,1982,4430 } },
{ "Canon PowerShot A720", 0, 0, /* DJC */
{ 14573,-5482,-1546,-1266,9799,1468,-1040,1912,3810 } },
{ "Canon PowerShot S3 IS", 0, 0, /* DJC */
{ 14062,-5199,-1446,-4712,12470,2243,-1286,2028,4836 } },
{ "Canon PowerShot SX110 IS", 0, 0, /* DJC */
{ 14134,-5576,-1527,-1991,10719,1273,-1158,1929,3581 } },
{ "Canon PowerShot SX220", 0, 0, /* DJC */
{ 13898,-5076,-1447,-1405,10109,1297,-244,1860,3687 } },
{ "Canon IXUS 160", 0, 0, /* DJC */
{ 11657,-3781,-1136,-3544,11262,2283,-160,1219,4700 } },
{ "Casio EX-S20", 0, 0, /* DJC */
{ 11634,-3924,-1128,-4968,12954,2015,-1588,2648,7206 } },
{ "Casio EX-Z750", 0, 0, /* DJC */
{ 10819,-3873,-1099,-4903,13730,1175,-1755,3751,4632 } },
{ "Casio EX-Z10", 128, 0xfff, /* DJC */
{ 9790,-3338,-603,-2321,10222,2099,-344,1273,4799 } },
{ "CINE 650", 0, 0,
{ 3390,480,-500,-800,3610,340,-550,2336,1192 } },
{ "CINE 660", 0, 0,
{ 3390,480,-500,-800,3610,340,-550,2336,1192 } },
{ "CINE", 0, 0,
{ 20183,-4295,-423,-3940,15330,3985,-280,4870,9800 } },
{ "Contax N Digital", 0, 0xf1e,
{ 7777,1285,-1053,-9280,16543,2916,-3677,5679,7060 } },
{ "DXO ONE", 0, 0,
{ 6596,-2079,-562,-4782,13016,1933,-970,1581,5181 } },
{ "Epson R-D1", 0, 0,
{ 6827,-1878,-732,-8429,16012,2564,-704,592,7145 } },
{ "Fujifilm E550", 0, 0,
{ 11044,-3888,-1120,-7248,15168,2208,-1531,2277,8069 } },
{ "Fujifilm E900", 0, 0,
{ 9183,-2526,-1078,-7461,15071,2574,-2022,2440,8639 } },
{ "Fujifilm F5", 0, 0,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm F6", 0, 0,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm F77", 0, 0xfe9,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm F7", 0, 0,
{ 10004,-3219,-1201,-7036,15047,2107,-1863,2565,7736 } },
{ "Fujifilm F8", 0, 0,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm GFX 50S", 0, 0,
{ 11756,-4754,-874,-3056,11045,2305,-381,1457,6006 } },
{ "Fujifilm S100FS", 514, 0,
{ 11521,-4355,-1065,-6524,13767,3058,-1466,1984,6045 } },
{ "Fujifilm S1", 0, 0,
{ 12297,-4882,-1202,-2106,10691,1623,-88,1312,4790 } },
{ "Fujifilm S20Pro", 0, 0,
{ 10004,-3219,-1201,-7036,15047,2107,-1863,2565,7736 } },
{ "Fujifilm S20", 512, 0x3fff,
{ 11401,-4498,-1312,-5088,12751,2613,-838,1568,5941 } },
{ "Fujifilm S2Pro", 128, 0xf15,
{ 12492,-4690,-1402,-7033,15423,1647,-1507,2111,7697 } },
{ "Fujifilm S3Pro", 0, 0x3dff,
{ 11807,-4612,-1294,-8927,16968,1988,-2120,2741,8006 } },
{ "Fujifilm S5Pro", 0, 0,
{ 12300,-5110,-1304,-9117,17143,1998,-1947,2448,8100 } },
{ "Fujifilm S5000", 0, 0,
{ 8754,-2732,-1019,-7204,15069,2276,-1702,2334,6982 } },
{ "Fujifilm S5100", 0, 0,
{ 11940,-4431,-1255,-6766,14428,2542,-993,1165,7421 } },
{ "Fujifilm S5500", 0, 0,
{ 11940,-4431,-1255,-6766,14428,2542,-993,1165,7421 } },
{ "Fujifilm S5200", 0, 0,
{ 9636,-2804,-988,-7442,15040,2589,-1803,2311,8621 } },
{ "Fujifilm S5600", 0, 0,
{ 9636,-2804,-988,-7442,15040,2589,-1803,2311,8621 } },
{ "Fujifilm S6", 0, 0,
{ 12628,-4887,-1401,-6861,14996,1962,-2198,2782,7091 } },
{ "Fujifilm S7000", 0, 0,
{ 10190,-3506,-1312,-7153,15051,2238,-2003,2399,7505 } },
{ "Fujifilm S9000", 0, 0,
{ 10491,-3423,-1145,-7385,15027,2538,-1809,2275,8692 } },
{ "Fujifilm S9500", 0, 0,
{ 10491,-3423,-1145,-7385,15027,2538,-1809,2275,8692 } },
{ "Fujifilm S9100", 0, 0,
{ 12343,-4515,-1285,-7165,14899,2435,-1895,2496,8800 } },
{ "Fujifilm S9600", 0, 0,
{ 12343,-4515,-1285,-7165,14899,2435,-1895,2496,8800 } },
{ "Fujifilm SL1000", 0, 0,
{ 11705,-4262,-1107,-2282,10791,1709,-555,1713,4945 } },
{ "Fujifilm IS-1", 0, 0,
{ 21461,-10807,-1441,-2332,10599,1999,289,875,7703 } },
{ "Fujifilm IS Pro", 0, 0,
{ 12300,-5110,-1304,-9117,17143,1998,-1947,2448,8100 } },
{ "Fujifilm HS10 HS11", 0, 0xf68,
{ 12440,-3954,-1183,-1123,9674,1708,-83,1614,4086 } },
{ "Fujifilm HS2", 0, 0xfef,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm HS3", 0, 0,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm HS50EXR", 0, 0,
{ 12085,-4727,-953,-3257,11489,2002,-511,2046,4592 } },
{ "Fujifilm F900EXR", 0, 0,
{ 12085,-4727,-953,-3257,11489,2002,-511,2046,4592 } },
{ "Fujifilm X100F", 0, 0,
{ 11434,-4948,-1210,-3746,12042,1903,-666,1479,5235 } },
{ "Fujifilm X100S", 0, 0,
{ 10592,-4262,-1008,-3514,11355,2465,-870,2025,6386 } },
{ "Fujifilm X100T", 0, 0,
{ 10592,-4262,-1008,-3514,11355,2465,-870,2025,6386 } },
{ "Fujifilm X100", 0, 0,
{ 12161,-4457,-1069,-5034,12874,2400,-795,1724,6904 } },
{ "Fujifilm X10", 0, 0,
{ 13509,-6199,-1254,-4430,12733,1865,-331,1441,5022 } },
{ "Fujifilm X20", 0, 0,
{ 11768,-4971,-1133,-4904,12927,2183,-480,1723,4605 } },
{ "Fujifilm X30", 0, 0,
{ 12328,-5256,-1144,-4469,12927,1675,-87,1291,4351 } },
{ "Fujifilm X70", 0, 0,
{ 10450,-4329,-878,-3217,11105,2421,-752,1758,6519 } },
{ "Fujifilm X-Pro1", 0, 0,
{ 10413,-3996,-993,-3721,11640,2361,-733,1540,6011 } },
{ "Fujifilm X-Pro2", 0, 0,
{ 11434,-4948,-1210,-3746,12042,1903,-666,1479,5235 } },
{ "Fujifilm X-A10", 0, 0,
{ 11540,-4999,-991,-2949,10963,2278,-382,1049,5605 } },
{ "Fujifilm X-A20", 0, 0,
{ 11540,-4999,-991,-2949,10963,2278,-382,1049,5605 } },
{ "Fujifilm X-A1", 0, 0,
{ 11086,-4555,-839,-3512,11310,2517,-815,1341,5940 } },
{ "Fujifilm X-A2", 0, 0,
{ 10763,-4560,-917,-3346,11311,2322,-475,1135,5843 } },
{ "Fujifilm X-A3", 0, 0,
{ 12407,-5222,-1086,-2971,11116,2120,-294,1029,5284 } },
{ "Fujifilm X-A5", 0, 0,
{ 11673,-4760,-1041,-3988,12058,2166,-771,1417,5569 } },
{ "Fujifilm X-E1", 0, 0,
{ 10413,-3996,-993,-3721,11640,2361,-733,1540,6011 } },
{ "Fujifilm X-E2S", 0, 0,
{ 11562,-5118,-961,-3022,11007,2311,-525,1569,6097 } },
{ "Fujifilm X-E2", 0, 0,
{ 8458,-2451,-855,-4597,12447,2407,-1475,2482,6526 } },
{ "Fujifilm X-E3", 0, 0,
{ 11434,-4948,-1210,-3746,12042,1903,-666,1479,5235 } },
{ "Fujifilm X-H1", 0, 0,
{ 11434,-4948,-1210,-3746,12042,1903,-666,1479,5235 } },
{ "Fujifilm X-M1", 0, 0,
{ 10413,-3996,-993,-3721,11640,2361,-733,1540,6011 } },
{ "Fujifilm X-S1", 0, 0,
{ 13509,-6199,-1254,-4430,12733,1865,-331,1441,5022 } },
{ "Fujifilm X-T1", 0, 0, /* also X-T10 */
{ 8458,-2451,-855,-4597,12447,2407,-1475,2482,6526 } },
{ "Fujifilm X-T2", 0, 0, /* also X-T20 */
{ 11434,-4948,-1210,-3746,12042,1903,-666,1479,5235 } },
{ "Fujifilm XF1", 0, 0,
{ 13509,-6199,-1254,-4430,12733,1865,-331,1441,5022 } },
{ "Fujifilm XQ", 0, 0, /* XQ1 and XQ2 */
{ 9252,-2704,-1064,-5893,14265,1717,-1101,2341,4349 } },
{ "GoPro HERO5 Black", 0, 0,
{ 10344,-4210,-620,-2315,10625,1948,93,1058,5541 } },
{ "Imacon Ixpress", 0, 0, /* DJC */
{ 7025,-1415,-704,-5188,13765,1424,-1248,2742,6038 } },
{ "Kodak NC2000", 0, 0,
{ 13891,-6055,-803,-465,9919,642,2121,82,1291 } },
{ "Kodak DCS315C", 8, 0,
{ 17523,-4827,-2510,756,8546,-137,6113,1649,2250 } },
{ "Kodak DCS330C", 8, 0,
{ 20620,-7572,-2801,-103,10073,-396,3551,-233,2220 } },
{ "Kodak DCS420", 0, 0,
{ 10868,-1852,-644,-1537,11083,484,2343,628,2216 } },
{ "Kodak DCS460", 0, 0,
{ 10592,-2206,-967,-1944,11685,230,2206,670,1273 } },
{ "Kodak EOSDCS1", 0, 0,
{ 10592,-2206,-967,-1944,11685,230,2206,670,1273 } },
{ "Kodak EOSDCS3B", 0, 0,
{ 9898,-2700,-940,-2478,12219,206,1985,634,1031 } },
{ "Kodak DCS520C", 178, 0,
{ 24542,-10860,-3401,-1490,11370,-297,2858,-605,3225 } },
{ "Kodak DCS560C", 177, 0,
{ 20482,-7172,-3125,-1033,10410,-285,2542,226,3136 } },
{ "Kodak DCS620C", 177, 0,
{ 23617,-10175,-3149,-2054,11749,-272,2586,-489,3453 } },
{ "Kodak DCS620X", 176, 0,
{ 13095,-6231,154,12221,-21,-2137,895,4602,2258 } },
{ "Kodak DCS660C", 173, 0,
{ 18244,-6351,-2739,-791,11193,-521,3711,-129,2802 } },
{ "Kodak DCS720X", 0, 0,
{ 11775,-5884,950,9556,1846,-1286,-1019,6221,2728 } },
{ "Kodak DCS760C", 0, 0,
{ 16623,-6309,-1411,-4344,13923,323,2285,274,2926 } },
{ "Kodak DCS Pro SLR", 0, 0,
{ 5494,2393,-232,-6427,13850,2846,-1876,3997,5445 } },
{ "Kodak DCS Pro 14nx", 0, 0,
{ 5494,2393,-232,-6427,13850,2846,-1876,3997,5445 } },
{ "Kodak DCS Pro 14", 0, 0,
{ 7791,3128,-776,-8588,16458,2039,-2455,4006,6198 } },
{ "Kodak ProBack645", 0, 0,
{ 16414,-6060,-1470,-3555,13037,473,2545,122,4948 } },
{ "Kodak ProBack", 0, 0,
{ 21179,-8316,-2918,-915,11019,-165,3477,-180,4210 } },
{ "Kodak P712", 0, 0,
{ 9658,-3314,-823,-5163,12695,2768,-1342,1843,6044 } },
{ "Kodak P850", 0, 0xf7c,
{ 10511,-3836,-1102,-6946,14587,2558,-1481,1792,6246 } },
{ "Kodak P880", 0, 0xfff,
{ 12805,-4662,-1376,-7480,15267,2360,-1626,2194,7904 } },
{ "Kodak EasyShare Z980", 0, 0,
{ 11313,-3559,-1101,-3893,11891,2257,-1214,2398,4908 } },
{ "Kodak EasyShare Z981", 0, 0,
{ 12729,-4717,-1188,-1367,9187,2582,274,860,4411 } },
{ "Kodak EasyShare Z990", 0, 0xfed,
{ 11749,-4048,-1309,-1867,10572,1489,-138,1449,4522 } },
{ "Kodak EASYSHARE Z1015", 0, 0xef1,
{ 11265,-4286,-992,-4694,12343,2647,-1090,1523,5447 } },
{ "Leaf CMost", 0, 0,
{ 3952,2189,449,-6701,14585,2275,-4536,7349,6536 } },
{ "Leaf Valeo 6", 0, 0,
{ 3952,2189,449,-6701,14585,2275,-4536,7349,6536 } },
{ "Leaf Aptus 54S", 0, 0,
{ 8236,1746,-1314,-8251,15953,2428,-3673,5786,5771 } },
{ "Leaf Aptus 65", 0, 0,
{ 7914,1414,-1190,-8777,16582,2280,-2811,4605,5562 } },
{ "Leaf Aptus 75", 0, 0,
{ 7914,1414,-1190,-8777,16582,2280,-2811,4605,5562 } },
{ "Leaf", 0, 0,
{ 8236,1746,-1314,-8251,15953,2428,-3673,5786,5771 } },
{ "Mamiya ZD", 0, 0,
{ 7645,2579,-1363,-8689,16717,2015,-3712,5941,5961 } },
{ "Micron 2010", 110, 0, /* DJC */
{ 16695,-3761,-2151,155,9682,163,3433,951,4904 } },
{ "Minolta DiMAGE 5", 0, 0xf7d,
{ 8983,-2942,-963,-6556,14476,2237,-2426,2887,8014 } },
{ "Minolta DiMAGE 7Hi", 0, 0xf7d,
{ 11368,-3894,-1242,-6521,14358,2339,-2475,3056,7285 } },
{ "Minolta DiMAGE 7", 0, 0xf7d,
{ 9144,-2777,-998,-6676,14556,2281,-2470,3019,7744 } },
{ "Minolta DiMAGE A1", 0, 0xf8b,
{ 9274,-2547,-1167,-8220,16323,1943,-2273,2720,8340 } },
{ "Minolta DiMAGE A200", 0, 0,
{ 8560,-2487,-986,-8112,15535,2771,-1209,1324,7743 } },
{ "Minolta DiMAGE A2", 0, 0xf8f,
{ 9097,-2726,-1053,-8073,15506,2762,-966,981,7763 } },
{ "Minolta DiMAGE Z2", 0, 0, /* DJC */
{ 11280,-3564,-1370,-4655,12374,2282,-1423,2168,5396 } },
{ "Minolta DYNAX 5", 0, 0xffb,
{ 10284,-3283,-1086,-7957,15762,2316,-829,882,6644 } },
{ "Minolta DYNAX 7", 0, 0xffb,
{ 10239,-3104,-1099,-8037,15727,2451,-927,925,6871 } },
{ "Motorola PIXL", 0, 0, /* DJC */
{ 8898,-989,-1033,-3292,11619,1674,-661,3178,5216 } },
{ "Nikon D100", 0, 0,
{ 5902,-933,-782,-8983,16719,2354,-1402,1455,6464 } },
{ "Nikon D1H", 0, 0,
{ 7577,-2166,-926,-7454,15592,1934,-2377,2808,8606 } },
{ "Nikon D1X", 0, 0,
{ 7702,-2245,-975,-9114,17242,1875,-2679,3055,8521 } },
{ "Nikon D1", 0, 0, /* multiplied by 2.218750, 1.0, 1.148438 */
{ 16772,-4726,-2141,-7611,15713,1972,-2846,3494,9521 } },
{ "Nikon D200", 0, 0xfbc,
{ 8367,-2248,-763,-8758,16447,2422,-1527,1550,8053 } },
{ "Nikon D2H", 0, 0,
{ 5710,-901,-615,-8594,16617,2024,-2975,4120,6830 } },
{ "Nikon D2X", 0, 0,
{ 10231,-2769,-1255,-8301,15900,2552,-797,680,7148 } },
{ "Nikon D3000", 0, 0,
{ 8736,-2458,-935,-9075,16894,2251,-1354,1242,8263 } },
{ "Nikon D3100", 0, 0,
{ 7911,-2167,-813,-5327,13150,2408,-1288,2483,7968 } },
{ "Nikon D3200", 0, 0xfb9,
{ 7013,-1408,-635,-5268,12902,2640,-1470,2801,7379 } },
{ "Nikon D3300", 0, 0,
{ 6988,-1384,-714,-5631,13410,2447,-1485,2204,7318 } },
{ "Nikon D3400", 0, 0,
{ 6988,-1384,-714,-5631,13410,2447,-1485,2204,7318 } },
{ "Nikon D300", 0, 0,
{ 9030,-1992,-715,-8465,16302,2255,-2689,3217,8069 } },
{ "Nikon D3X", 0, 0,
{ 7171,-1986,-648,-8085,15555,2718,-2170,2512,7457 } },
{ "Nikon D3S", 0, 0,
{ 8828,-2406,-694,-4874,12603,2541,-660,1509,7587 } },
{ "Nikon D3", 0, 0,
{ 8139,-2171,-663,-8747,16541,2295,-1925,2008,8093 } },
{ "Nikon D40X", 0, 0,
{ 8819,-2543,-911,-9025,16928,2151,-1329,1213,8449 } },
{ "Nikon D40", 0, 0,
{ 6992,-1668,-806,-8138,15748,2543,-874,850,7897 } },
{ "Nikon D4S", 0, 0,
{ 8598,-2848,-857,-5618,13606,2195,-1002,1773,7137 } },
{ "Nikon D4", 0, 0,
{ 8598,-2848,-857,-5618,13606,2195,-1002,1773,7137 } },
{ "Nikon Df", 0, 0,
{ 8598,-2848,-857,-5618,13606,2195,-1002,1773,7137 } },
{ "Nikon D5000", 0, 0xf00,
{ 7309,-1403,-519,-8474,16008,2622,-2433,2826,8064 } },
{ "Nikon D5100", 0, 0x3de6,
{ 8198,-2239,-724,-4871,12389,2798,-1043,2050,7181 } },
{ "Nikon D5200", 0, 0,
{ 8322,-3112,-1047,-6367,14342,2179,-988,1638,6394 } },
{ "Nikon D5300", 0, 0,
{ 6988,-1384,-714,-5631,13410,2447,-1485,2204,7318 } },
{ "Nikon D5500", 0, 0,
{ 8821,-2938,-785,-4178,12142,2287,-824,1651,6860 } },
{ "Nikon D5600", 0, 0,
{ 8821,-2938,-785,-4178,12142,2287,-824,1651,6860 } },
{ "Nikon D500", 0, 0,
{ 8813,-3210,-1036,-4703,12868,2021,-1054,1940,6129 } },
{ "Nikon D50", 0, 0,
{ 7732,-2422,-789,-8238,15884,2498,-859,783,7330 } },
{ "Nikon D5", 0, 0,
{ 9200,-3522,-992,-5755,13803,2117,-753,1486,6338 } },
{ "Nikon D600", 0, 0x3e07,
{ 8178,-2245,-609,-4857,12394,2776,-1207,2086,7298 } },
{ "Nikon D610", 0, 0,
{ 8178,-2245,-609,-4857,12394,2776,-1207,2086,7298 } },
{ "Nikon D60", 0, 0,
{ 8736,-2458,-935,-9075,16894,2251,-1354,1242,8263 } },
{ "Nikon D7000", 0, 0,
{ 8198,-2239,-724,-4871,12389,2798,-1043,2050,7181 } },
{ "Nikon D7100", 0, 0,
{ 8322,-3112,-1047,-6367,14342,2179,-988,1638,6394 } },
{ "Nikon D7200", 0, 0,
{ 8322,-3112,-1047,-6367,14342,2179,-988,1638,6394 } },
{ "Nikon D7500", 0, 0,
{ 8813,-3210,-1036,-4703,12868,2021,-1054,1940,6129 } },
{ "Nikon D750", 0, 0,
{ 9020,-2890,-715,-4535,12436,2348,-934,1919,7086 } },
{ "Nikon D700", 0, 0,
{ 8139,-2171,-663,-8747,16541,2295,-1925,2008,8093 } },
{ "Nikon D70", 0, 0,
{ 7732,-2422,-789,-8238,15884,2498,-859,783,7330 } },
{ "Nikon D850", 0, 0,
{ 10405,-3755,-1270,-5461,13787,1793,-1040,2015,6785 } },
{ "Nikon D810", 0, 0,
{ 9369,-3195,-791,-4488,12430,2301,-893,1796,6872 } },
{ "Nikon D800", 0, 0,
{ 7866,-2108,-555,-4869,12483,2681,-1176,2069,7501 } },
{ "Nikon D80", 0, 0,
{ 8629,-2410,-883,-9055,16940,2171,-1490,1363,8520 } },
{ "Nikon D90", 0, 0xf00,
{ 7309,-1403,-519,-8474,16008,2622,-2434,2826,8064 } },
{ "Nikon E700", 0, 0x3dd, /* DJC */
{ -3746,10611,1665,9621,-1734,2114,-2389,7082,3064,3406,6116,-244 } },
{ "Nikon E800", 0, 0x3dd, /* DJC */
{ -3746,10611,1665,9621,-1734,2114,-2389,7082,3064,3406,6116,-244 } },
{ "Nikon E950", 0, 0x3dd, /* DJC */
{ -3746,10611,1665,9621,-1734,2114,-2389,7082,3064,3406,6116,-244 } },
{ "Nikon E995", 0, 0, /* copied from E5000 */
{ -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } },
{ "Nikon E2100", 0, 0, /* copied from Z2, new white balance */
{ 13142,-4152,-1596,-4655,12374,2282,-1769,2696,6711} },
{ "Nikon E2500", 0, 0,
{ -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } },
{ "Nikon E3200", 0, 0, /* DJC */
{ 9846,-2085,-1019,-3278,11109,2170,-774,2134,5745 } },
{ "Nikon E4300", 0, 0, /* copied from Minolta DiMAGE Z2 */
{ 11280,-3564,-1370,-4655,12374,2282,-1423,2168,5396 } },
{ "Nikon E4500", 0, 0,
{ -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } },
{ "Nikon E5000", 0, 0,
{ -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } },
{ "Nikon E5400", 0, 0,
{ 9349,-2987,-1001,-7919,15766,2266,-2098,2680,6839 } },
{ "Nikon E5700", 0, 0,
{ -5368,11478,2368,5537,-113,3148,-4969,10021,5782,778,9028,211 } },
{ "Nikon E8400", 0, 0,
{ 7842,-2320,-992,-8154,15718,2599,-1098,1342,7560 } },
{ "Nikon E8700", 0, 0,
{ 8489,-2583,-1036,-8051,15583,2643,-1307,1407,7354 } },
{ "Nikon E8800", 0, 0,
{ 7971,-2314,-913,-8451,15762,2894,-1442,1520,7610 } },
{ "Nikon COOLPIX A", 0, 0,
{ 8198,-2239,-724,-4871,12389,2798,-1043,2050,7181 } },
{ "Nikon COOLPIX B700", 200, 0,
{ 14387,-6014,-1299,-1357,9975,1616,467,1047,4744 } },
{ "Nikon COOLPIX P330", 200, 0,
{ 10321,-3920,-931,-2750,11146,1824,-442,1545,5539 } },
{ "Nikon COOLPIX P340", 200, 0,
{ 10321,-3920,-931,-2750,11146,1824,-442,1545,5539 } },
{ "Nikon COOLPIX P6000", 0, 0,
{ 9698,-3367,-914,-4706,12584,2368,-837,968,5801 } },
{ "Nikon COOLPIX P7000", 0, 0,
{ 11432,-3679,-1111,-3169,11239,2202,-791,1380,4455 } },
{ "Nikon COOLPIX P7100", 0, 0,
{ 11053,-4269,-1024,-1976,10182,2088,-526,1263,4469 } },
{ "Nikon COOLPIX P7700", 200, 0,
{ 10321,-3920,-931,-2750,11146,1824,-442,1545,5539 } },
{ "Nikon COOLPIX P7800", 200, 0,
{ 10321,-3920,-931,-2750,11146,1824,-442,1545,5539 } },
{ "Nikon 1 V3", 0, 0,
{ 5958,-1559,-571,-4021,11453,2939,-634,1548,5087 } },
{ "Nikon 1 J4", 0, 0,
{ 5958,-1559,-571,-4021,11453,2939,-634,1548,5087 } },
{ "Nikon 1 J5", 0, 0,
{ 7520,-2518,-645,-3844,12102,1945,-913,2249,6835 } },
{ "Nikon 1 S2", 200, 0,
{ 6612,-1342,-618,-3338,11055,2623,-174,1792,5075 } },
{ "Nikon 1 V2", 0, 0,
{ 6588,-1305,-693,-3277,10987,2634,-355,2016,5106 } },
{ "Nikon 1 J3", 0, 0,
{ 6588,-1305,-693,-3277,10987,2634,-355,2016,5106 } },
{ "Nikon 1 AW1", 0, 0,
{ 6588,-1305,-693,-3277,10987,2634,-355,2016,5106 } },
{ "Nikon 1 ", 0, 0, /* J1, J2, S1, V1 */
{ 8994,-2667,-865,-4594,12324,2552,-699,1786,6260 } },
{ "Olympus AIR A01", 0, 0,
{ 8992,-3093,-639,-2563,10721,2122,-437,1270,5473 } },
{ "Olympus C5050", 0, 0,
{ 10508,-3124,-1273,-6079,14294,1901,-1653,2306,6237 } },
{ "Olympus C5060", 0, 0,
{ 10445,-3362,-1307,-7662,15690,2058,-1135,1176,7602 } },
{ "Olympus C7070", 0, 0,
{ 10252,-3531,-1095,-7114,14850,2436,-1451,1723,6365 } },
{ "Olympus C70", 0, 0,
{ 10793,-3791,-1146,-7498,15177,2488,-1390,1577,7321 } },
{ "Olympus C80", 0, 0,
{ 8606,-2509,-1014,-8238,15714,2703,-942,979,7760 } },
{ "Olympus E-10", 0, 0xffc,
{ 12745,-4500,-1416,-6062,14542,1580,-1934,2256,6603 } },
{ "Olympus E-1", 0, 0,
{ 11846,-4767,-945,-7027,15878,1089,-2699,4122,8311 } },
{ "Olympus E-20", 0, 0xffc,
{ 13173,-4732,-1499,-5807,14036,1895,-2045,2452,7142 } },
{ "Olympus E-300", 0, 0,
{ 7828,-1761,-348,-5788,14071,1830,-2853,4518,6557 } },
{ "Olympus E-330", 0, 0,
{ 8961,-2473,-1084,-7979,15990,2067,-2319,3035,8249 } },
{ "Olympus E-30", 0, 0xfbc,
{ 8144,-1861,-1111,-7763,15894,1929,-1865,2542,7607 } },
{ "Olympus E-3", 0, 0xf99,
{ 9487,-2875,-1115,-7533,15606,2010,-1618,2100,7389 } },
{ "Olympus E-400", 0, 0,
{ 6169,-1483,-21,-7107,14761,2536,-2904,3580,8568 } },
{ "Olympus E-410", 0, 0xf6a,
{ 8856,-2582,-1026,-7761,15766,2082,-2009,2575,7469 } },
{ "Olympus E-420", 0, 0xfd7,
{ 8746,-2425,-1095,-7594,15612,2073,-1780,2309,7416 } },
{ "Olympus E-450", 0, 0xfd2,
{ 8745,-2425,-1095,-7594,15613,2073,-1780,2309,7416 } },
{ "Olympus E-500", 0, 0,
{ 8136,-1968,-299,-5481,13742,1871,-2556,4205,6630 } },
{ "Olympus E-510", 0, 0xf6a,
{ 8785,-2529,-1033,-7639,15624,2112,-1783,2300,7817 } },
{ "Olympus E-520", 0, 0xfd2,
{ 8344,-2322,-1020,-7596,15635,2048,-1748,2269,7287 } },
{ "Olympus E-5", 0, 0xeec,
{ 11200,-3783,-1325,-4576,12593,2206,-695,1742,7504 } },
{ "Olympus E-600", 0, 0xfaf,
{ 8453,-2198,-1092,-7609,15681,2008,-1725,2337,7824 } },
{ "Olympus E-620", 0, 0xfaf,
{ 8453,-2198,-1092,-7609,15681,2008,-1725,2337,7824 } },
{ "Olympus E-P1", 0, 0xffd,
{ 8343,-2050,-1021,-7715,15705,2103,-1831,2380,8235 } },
{ "Olympus E-P2", 0, 0xffd,
{ 8343,-2050,-1021,-7715,15705,2103,-1831,2380,8235 } },
{ "Olympus E-P3", 0, 0,
{ 7575,-2159,-571,-3722,11341,2725,-1434,2819,6271 } },
{ "Olympus E-P5", 0, 0,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus E-PL1s", 0, 0,
{ 11409,-3872,-1393,-4572,12757,2003,-709,1810,7415 } },
{ "Olympus E-PL1", 0, 0,
{ 11408,-4289,-1215,-4286,12385,2118,-387,1467,7787 } },
{ "Olympus E-PL2", 0, 0xcf3,
{ 15030,-5552,-1806,-3987,12387,1767,-592,1670,7023 } },
{ "Olympus E-PL3", 0, 0,
{ 7575,-2159,-571,-3722,11341,2725,-1434,2819,6271 } },
{ "Olympus E-PL5", 0, 0xfcb,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus E-PL6", 0, 0,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus E-PL7", 0, 0,
{ 9197,-3190,-659,-2606,10830,2039,-458,1250,5458 } },
{ "Olympus E-PL8", 0, 0,
{ 9197,-3190,-659,-2606,10830,2039,-458,1250,5458 } },
{ "Olympus E-PL9", 0, 0,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus E-PM1", 0, 0,
{ 7575,-2159,-571,-3722,11341,2725,-1434,2819,6271 } },
{ "Olympus E-PM2", 0, 0,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus E-M10", 0, 0, /* also E-M10 Mark II & III */
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus E-M1Mark II", 0, 0,
{ 9383,-3170,-763,-2457,10702,2020,-384,1236,5552 } },
{ "Olympus E-M1", 0, 0,
{ 7687,-1984,-606,-4327,11928,2721,-1381,2339,6452 } },
{ "Olympus E-M5MarkII", 0, 0,
{ 9422,-3258,-711,-2655,10898,2015,-512,1354,5512 } },
{ "Olympus E-M5", 0, 0xfe1,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus PEN-F", 0, 0,
{ 9476,-3182,-765,-2613,10958,1893,-449,1315,5268 } },
{ "Olympus SH-2", 0, 0,
{ 10156,-3425,-1077,-2611,11177,1624,-385,1592,5080 } },
{ "Olympus SP350", 0, 0,
{ 12078,-4836,-1069,-6671,14306,2578,-786,939,7418 } },
{ "Olympus SP3", 0, 0,
{ 11766,-4445,-1067,-6901,14421,2707,-1029,1217,7572 } },
{ "Olympus SP500UZ", 0, 0xfff,
{ 9493,-3415,-666,-5211,12334,3260,-1548,2262,6482 } },
{ "Olympus SP510UZ", 0, 0xffe,
{ 10593,-3607,-1010,-5881,13127,3084,-1200,1805,6721 } },
{ "Olympus SP550UZ", 0, 0xffe,
{ 11597,-4006,-1049,-5432,12799,2957,-1029,1750,6516 } },
{ "Olympus SP560UZ", 0, 0xff9,
{ 10915,-3677,-982,-5587,12986,2911,-1168,1968,6223 } },
{ "Olympus SP570UZ", 0, 0,
{ 11522,-4044,-1146,-4736,12172,2904,-988,1829,6039 } },
{ "Olympus STYLUS1", 0, 0,
{ 8360,-2420,-880,-3928,12353,1739,-1381,2416,5173 } },
{ "Olympus TG-4", 0, 0,
{ 11426,-4159,-1126,-2066,10678,1593,-120,1327,4998 } },
{ "Olympus TG-5", 0, 0,
{ 10899,-3833,-1082,-2112,10736,1575,-267,1452,5269 } },
{ "Olympus XZ-10", 0, 0,
{ 9777,-3483,-925,-2886,11297,1800,-602,1663,5134 } },
{ "Olympus XZ-1", 0, 0,
{ 10901,-4095,-1074,-1141,9208,2293,-62,1417,5158 } },
{ "Olympus XZ-2", 0, 0,
{ 9777,-3483,-925,-2886,11297,1800,-602,1663,5134 } },
{ "OmniVision", 0, 0, /* DJC */
{ 12782,-4059,-379,-478,9066,1413,1340,1513,5176 } },
{ "Pentax *ist DL2", 0, 0,
{ 10504,-2438,-1189,-8603,16207,2531,-1022,863,12242 } },
{ "Pentax *ist DL", 0, 0,
{ 10829,-2838,-1115,-8339,15817,2696,-837,680,11939 } },
{ "Pentax *ist DS2", 0, 0,
{ 10504,-2438,-1189,-8603,16207,2531,-1022,863,12242 } },
{ "Pentax *ist DS", 0, 0,
{ 10371,-2333,-1206,-8688,16231,2602,-1230,1116,11282 } },
{ "Pentax *ist D", 0, 0,
{ 9651,-2059,-1189,-8881,16512,2487,-1460,1345,10687 } },
{ "Pentax K10D", 0, 0,
{ 9566,-2863,-803,-7170,15172,2112,-818,803,9705 } },
{ "Pentax K1", 0, 0,
{ 11095,-3157,-1324,-8377,15834,2720,-1108,947,11688 } },
{ "Pentax K20D", 0, 0,
{ 9427,-2714,-868,-7493,16092,1373,-2199,3264,7180 } },
{ "Pentax K200D", 0, 0,
{ 9186,-2678,-907,-8693,16517,2260,-1129,1094,8524 } },
{ "Pentax K2000", 0, 0,
{ 11057,-3604,-1155,-5152,13046,2329,-282,375,8104 } },
{ "Pentax K-m", 0, 0,
{ 11057,-3604,-1155,-5152,13046,2329,-282,375,8104 } },
{ "Pentax K-x", 0, 0,
{ 8843,-2837,-625,-5025,12644,2668,-411,1234,7410 } },
{ "Pentax K-r", 0, 0,
{ 9895,-3077,-850,-5304,13035,2521,-883,1768,6936 } },
{ "Pentax K-1", 0, 0,
{ 8596,-2981,-639,-4202,12046,2431,-685,1424,6122 } },
{ "Pentax K-30", 0, 0,
{ 8710,-2632,-1167,-3995,12301,1881,-981,1719,6535 } },
{ "Pentax K-3 II", 0, 0,
{ 8626,-2607,-1155,-3995,12301,1881,-1039,1822,6925 } },
{ "Pentax K-3", 0, 0,
{ 7415,-2052,-721,-5186,12788,2682,-1446,2157,6773 } },
{ "Pentax K-5 II", 0, 0,
{ 8170,-2725,-639,-4440,12017,2744,-771,1465,6599 } },
{ "Pentax K-5", 0, 0,
{ 8713,-2833,-743,-4342,11900,2772,-722,1543,6247 } },
{ "Pentax K-70", 0, 0,
{ 8270,-2117,-1299,-4359,12953,1515,-1078,1933,5975 } },
{ "Pentax K-7", 0, 0,
{ 9142,-2947,-678,-8648,16967,1663,-2224,2898,8615 } },
{ "Pentax K-S1", 0, 0,
{ 8512,-3211,-787,-4167,11966,2487,-638,1288,6054 } },
{ "Pentax K-S2", 0, 0,
{ 8662,-3280,-798,-3928,11771,2444,-586,1232,6054 } },
{ "Pentax KP", 0, 0,
{ 8617,-3228,-1034,-4674,12821,2044,-803,1577,5728 } },
{ "Pentax Q-S1", 0, 0,
{ 12995,-5593,-1107,-1879,10139,2027,-64,1233,4919 } },
{ "Pentax 645D", 0, 0x3e00,
{ 10646,-3593,-1158,-3329,11699,1831,-667,2874,6287 } },
{ "Panasonic DMC-CM1", 15, 0,
{ 8770,-3194,-820,-2871,11281,1803,-513,1552,4434 } },
{ "Panasonic DC-FZ80", 0, 0,
{ 8550,-2908,-842,-3195,11529,1881,-338,1603,4631 } },
{ "Panasonic DMC-FZ8", 0, 0xf7f,
{ 8986,-2755,-802,-6341,13575,3077,-1476,2144,6379 } },
{ "Panasonic DMC-FZ18", 0, 0,
{ 9932,-3060,-935,-5809,13331,2753,-1267,2155,5575 } },
{ "Panasonic DMC-FZ28", 15, 0xf96,
{ 10109,-3488,-993,-5412,12812,2916,-1305,2140,5543 } },
{ "Panasonic DMC-FZ2500", 15, 0,
{ 7386,-2443,-743,-3437,11864,1757,-608,1660,4766 } },
{ "Panasonic DMC-FZ330", 15, 0,
{ 8378,-2798,-769,-3068,11410,1877,-538,1792,4623 } },
{ "Panasonic DMC-FZ300", 15, 0,
{ 8378,-2798,-769,-3068,11410,1877,-538,1792,4623 } },
{ "Panasonic DMC-FZ30", 0, 0xf94,
{ 10976,-4029,-1141,-7918,15491,2600,-1670,2071,8246 } },
{ "Panasonic DMC-FZ3", 15, 0, /* FZ35, FZ38 */
{ 9938,-2780,-890,-4604,12393,2480,-1117,2304,4620 } },
{ "Panasonic DMC-FZ4", 15, 0, /* FZ40, FZ45 */
{ 13639,-5535,-1371,-1698,9633,2430,316,1152,4108 } },
{ "Panasonic DMC-FZ50", 0, 0,
{ 7906,-2709,-594,-6231,13351,3220,-1922,2631,6537 } },
{ "Panasonic DMC-FZ7", 15, 0, /* FZ70, FZ72 */
{ 11532,-4324,-1066,-2375,10847,1749,-564,1699,4351 } },
{ "Leica V-LUX1", 0, 0,
{ 7906,-2709,-594,-6231,13351,3220,-1922,2631,6537 } },
{ "Panasonic DMC-L10", 15, 0xf96,
{ 8025,-1942,-1050,-7920,15904,2100,-2456,3005,7039 } },
{ "Panasonic DMC-L1", 0, 0xf7f,
{ 8054,-1885,-1025,-8349,16367,2040,-2805,3542,7629 } },
{ "Leica DIGILUX 3", 0, 0xf7f,
{ 8054,-1885,-1025,-8349,16367,2040,-2805,3542,7629 } },
{ "Panasonic DMC-LC1", 0, 0,
{ 11340,-4069,-1275,-7555,15266,2448,-2960,3426,7685 } },
{ "Leica DIGILUX 2", 0, 0,
{ 11340,-4069,-1275,-7555,15266,2448,-2960,3426,7685 } },
{ "Panasonic DMC-LX100", 15, 0,
{ 8844,-3538,-768,-3709,11762,2200,-698,1792,5220 } },
{ "Leica D-LUX (Typ 109)", 15, 0,
{ 8844,-3538,-768,-3709,11762,2200,-698,1792,5220 } },
{ "Panasonic DMC-LF1", 15, 0,
{ 9379,-3267,-816,-3227,11560,1881,-926,1928,5340 } },
{ "Leica C (Typ 112)", 15, 0,
{ 9379,-3267,-816,-3227,11560,1881,-926,1928,5340 } },
{ "Panasonic DMC-LX1", 0, 0xf7f,
{ 10704,-4187,-1230,-8314,15952,2501,-920,945,8927 } },
{ "Leica D-LUX2", 0, 0xf7f,
{ 10704,-4187,-1230,-8314,15952,2501,-920,945,8927 } },
{ "Panasonic DMC-LX2", 0, 0,
{ 8048,-2810,-623,-6450,13519,3272,-1700,2146,7049 } },
{ "Leica D-LUX3", 0, 0,
{ 8048,-2810,-623,-6450,13519,3272,-1700,2146,7049 } },
{ "Panasonic DMC-LX3", 15, 0,
{ 8128,-2668,-655,-6134,13307,3161,-1782,2568,6083 } },
{ "Leica D-LUX 4", 15, 0,
{ 8128,-2668,-655,-6134,13307,3161,-1782,2568,6083 } },
{ "Panasonic DMC-LX5", 15, 0,
{ 10909,-4295,-948,-1333,9306,2399,22,1738,4582 } },
{ "Leica D-LUX 5", 15, 0,
{ 10909,-4295,-948,-1333,9306,2399,22,1738,4582 } },
{ "Panasonic DMC-LX7", 15, 0,
{ 10148,-3743,-991,-2837,11366,1659,-701,1893,4899 } },
{ "Leica D-LUX 6", 15, 0,
{ 10148,-3743,-991,-2837,11366,1659,-701,1893,4899 } },
{ "Panasonic DMC-LX9", 15, 0,
{ 7790,-2736,-755,-3452,11870,1769,-628,1647,4898 } },
{ "Panasonic DMC-FZ1000", 15, 0,
{ 7830,-2696,-763,-3325,11667,1866,-641,1712,4824 } },
{ "Leica V-LUX (Typ 114)", 15, 0,
{ 7830,-2696,-763,-3325,11667,1866,-641,1712,4824 } },
{ "Panasonic DMC-FZ100", 15, 0xfff,
{ 16197,-6146,-1761,-2393,10765,1869,366,2238,5248 } },
{ "Leica V-LUX 2", 15, 0xfff,
{ 16197,-6146,-1761,-2393,10765,1869,366,2238,5248 } },
{ "Panasonic DMC-FZ150", 15, 0xfff,
{ 11904,-4541,-1189,-2355,10899,1662,-296,1586,4289 } },
{ "Leica V-LUX 3", 15, 0xfff,
{ 11904,-4541,-1189,-2355,10899,1662,-296,1586,4289 } },
{ "Panasonic DMC-FZ200", 15, 0xfff,
{ 8112,-2563,-740,-3730,11784,2197,-941,2075,4933 } },
{ "Leica V-LUX 4", 15, 0xfff,
{ 8112,-2563,-740,-3730,11784,2197,-941,2075,4933 } },
{ "Panasonic DMC-FX150", 15, 0xfff,
{ 9082,-2907,-925,-6119,13377,3058,-1797,2641,5609 } },
{ "Panasonic DMC-G10", 0, 0,
{ 10113,-3400,-1114,-4765,12683,2317,-377,1437,6710 } },
{ "Panasonic DMC-G1", 15, 0xf94,
{ 8199,-2065,-1056,-8124,16156,2033,-2458,3022,7220 } },
{ "Panasonic DMC-G2", 15, 0xf3c,
{ 10113,-3400,-1114,-4765,12683,2317,-377,1437,6710 } },
{ "Panasonic DMC-G3", 15, 0xfff,
{ 6763,-1919,-863,-3868,11515,2684,-1216,2387,5879 } },
{ "Panasonic DMC-G5", 15, 0xfff,
{ 7798,-2562,-740,-3879,11584,2613,-1055,2248,5434 } },
{ "Panasonic DMC-G6", 15, 0xfff,
{ 8294,-2891,-651,-3869,11590,2595,-1183,2267,5352 } },
{ "Panasonic DMC-G7", 15, 0xfff,
{ 7610,-2780,-576,-4614,12195,2733,-1375,2393,6490 } },
{ "Panasonic DMC-G8", 15, 0xfff, /* G8, G80, G81, G85 */
{ 7610,-2780,-576,-4614,12195,2733,-1375,2393,6490 } },
{ "Panasonic DC-G9", 15, 0xfff,
{ 7685,-2375,-634,-3687,11700,2249,-748,1546,5111 } },
{ "Panasonic DMC-GF1", 15, 0xf92,
{ 7888,-1902,-1011,-8106,16085,2099,-2353,2866,7330 } },
{ "Panasonic DMC-GF2", 15, 0xfff,
{ 7888,-1902,-1011,-8106,16085,2099,-2353,2866,7330 } },
{ "Panasonic DMC-GF3", 15, 0xfff,
{ 9051,-2468,-1204,-5212,13276,2121,-1197,2510,6890 } },
{ "Panasonic DMC-GF5", 15, 0xfff,
{ 8228,-2945,-660,-3938,11792,2430,-1094,2278,5793 } },
{ "Panasonic DMC-GF6", 15, 0,
{ 8130,-2801,-946,-3520,11289,2552,-1314,2511,5791 } },
{ "Panasonic DMC-GF7", 15, 0,
{ 7610,-2780,-576,-4614,12195,2733,-1375,2393,6490 } },
{ "Panasonic DMC-GF8", 15, 0,
{ 7610,-2780,-576,-4614,12195,2733,-1375,2393,6490 } },
{ "Panasonic DC-GF9", 15, 0,
{ 7610,-2780,-576,-4614,12195,2733,-1375,2393,6490 } },
{ "Panasonic DMC-GH1", 15, 0xf92,
{ 6299,-1466,-532,-6535,13852,2969,-2331,3112,5984 } },
{ "Panasonic DMC-GH2", 15, 0xf95,
{ 7780,-2410,-806,-3913,11724,2484,-1018,2390,5298 } },
{ "Panasonic DMC-GH3", 15, 0,
{ 6559,-1752,-491,-3672,11407,2586,-962,1875,5130 } },
{ "Panasonic DMC-GH4", 15, 0,
{ 7122,-2108,-512,-3155,11201,2231,-541,1423,5045 } },
{ "Panasonic DC-GH5S", 15, 0,
{ 6929,-2355,-708,-4192,12534,1828,-1097,1989,5195 } },
{ "Panasonic DC-GH5", 15, 0,
{ 7641,-2336,-605,-3218,11299,2187,-485,1338,5121 } },
{ "Panasonic DMC-GM1", 15, 0,
{ 6770,-1895,-744,-5232,13145,2303,-1664,2691,5703 } },
{ "Panasonic DMC-GM5", 15, 0,
{ 8238,-3244,-679,-3921,11814,2384,-836,2022,5852 } },
{ "Panasonic DMC-GX1", 15, 0,
{ 6763,-1919,-863,-3868,11515,2684,-1216,2387,5879 } },
{ "Panasonic DMC-GX7", 15, 0,
{ 7610,-2780,-576,-4614,12195,2733,-1375,2393,6490 } },
{ "Panasonic DMC-GX85", 15, 0,
{ 7771,-3020,-629,-4029,11950,2345,-821,1977,6119 } },
{ "Panasonic DMC-GX8", 15, 0,
{ 7564,-2263,-606,-3148,11239,2177,-540,1435,4853 } },
{ "Panasonic DC-GX9", 15, 0,
{ 7564,-2263,-606,-3148,11239,2177,-540,1435,4853 } },
{ "Panasonic DMC-ZS100", 15, 0,
{ 7790,-2736,-755,-3452,11870,1769,-628,1647,4898 } },
{ "Panasonic DC-ZS200", 15, 0,
{ 7790,-2736,-755,-3452,11870,1769,-628,1647,4898 } },
{ "Panasonic DMC-ZS40", 15, 0,
{ 8607,-2822,-808,-3755,11930,2049,-820,2060,5224 } },
{ "Panasonic DMC-ZS50", 15, 0,
{ 8802,-3135,-789,-3151,11468,1904,-550,1745,4810 } },
{ "Panasonic DMC-TZ82", 15, 0,
{ 8550,-2908,-842,-3195,11529,1881,-338,1603,4631 } },
{ "Panasonic DMC-ZS6", 15, 0,
{ 8550,-2908,-842,-3195,11529,1881,-338,1603,4631 } },
{ "Panasonic DMC-ZS70", 15, 0,
{ 9052,-3117,-883,-3045,11346,1927,-205,1520,4730 } },
{ "Leica S (Typ 007)", 0, 0,
{ 6063,-2234,-231,-5210,13787,1500,-1043,2866,6997 } },
{ "Leica X", 0, 0, /* X and X-U, both (Typ 113) */
{ 7712,-2059,-653,-3882,11494,2726,-710,1332,5958 } },
{ "Leica Q (Typ 116)", 0, 0,
{ 11865,-4523,-1441,-5423,14458,935,-1587,2687,4830 } },
{ "Leica M (Typ 262)", 0, 0,
{ 6653,-1486,-611,-4221,13303,929,-881,2416,7226 } },
{ "Leica SL (Typ 601)", 0, 0,
{ 11865,-4523,-1441,-5423,14458,935,-1587,2687,4830 } },
{ "Leica TL2", 0, 0,
{ 5836,-1626,-647,-5384,13326,2261,-1207,2129,5861 } },
{ "Leica TL", 0, 0,
{ 5463,-988,-364,-4634,12036,2946,-766,1389,6522 } },
{ "Leica CL", 0, 0,
{ 7414,-2393,-840,-5127,13180,2138,-1585,2468,5064 } },
{ "Leica M10", 0, 0,
{ 8249,-2849,-620,-5415,14756,565,-957,3074,6517 } },
{ "Phase One H 20", 0, 0, /* DJC */
{ 1313,1855,-109,-6715,15908,808,-327,1840,6020 } },
{ "Phase One H 25", 0, 0,
{ 2905,732,-237,-8134,16626,1476,-3038,4253,7517 } },
{ "Phase One P 2", 0, 0,
{ 2905,732,-237,-8134,16626,1476,-3038,4253,7517 } },
{ "Phase One P 30", 0, 0,
{ 4516,-245,-37,-7020,14976,2173,-3206,4671,7087 } },
{ "Phase One P 45", 0, 0,
{ 5053,-24,-117,-5684,14076,1702,-2619,4492,5849 } },
{ "Phase One P40", 0, 0,
{ 8035,435,-962,-6001,13872,2320,-1159,3065,5434 } },
{ "Phase One P65", 0, 0,
{ 8035,435,-962,-6001,13872,2320,-1159,3065,5434 } },
{ "Photron BC2-HD", 0, 0, /* DJC */
{ 14603,-4122,-528,-1810,9794,2017,-297,2763,5936 } },
{ "Red One", 704, 0xffff, /* DJC */
{ 21014,-7891,-2613,-3056,12201,856,-2203,5125,8042 } },
{ "Ricoh GR II", 0, 0,
{ 4630,-834,-423,-4977,12805,2417,-638,1467,6115 } },
{ "Ricoh GR", 0, 0,
{ 3708,-543,-160,-5381,12254,3556,-1471,1929,8234 } },
{ "Samsung EX1", 0, 0x3e00,
{ 8898,-2498,-994,-3144,11328,2066,-760,1381,4576 } },
{ "Samsung EX2F", 0, 0x7ff,
{ 10648,-3897,-1055,-2022,10573,1668,-492,1611,4742 } },
{ "Samsung EK-GN120", 0, 0,
{ 7557,-2522,-739,-4679,12949,1894,-840,1777,5311 } },
{ "Samsung NX mini", 0, 0,
{ 5222,-1196,-550,-6540,14649,2009,-1666,2819,5657 } },
{ "Samsung NX3300", 0, 0,
{ 8060,-2933,-761,-4504,12890,1762,-630,1489,5227 } },
{ "Samsung NX3000", 0, 0,
{ 8060,-2933,-761,-4504,12890,1762,-630,1489,5227 } },
{ "Samsung NX30", 0, 0, /* NX30, NX300, NX300M */
{ 7557,-2522,-739,-4679,12949,1894,-840,1777,5311 } },
{ "Samsung NX2000", 0, 0,
{ 7557,-2522,-739,-4679,12949,1894,-840,1777,5311 } },
{ "Samsung NX2", 0, 0xfff, /* NX20, NX200, NX210 */
{ 6933,-2268,-753,-4921,13387,1647,-803,1641,6096 } },
{ "Samsung NX1000", 0, 0,
{ 6933,-2268,-753,-4921,13387,1647,-803,1641,6096 } },
{ "Samsung NX1100", 0, 0,
{ 6933,-2268,-753,-4921,13387,1647,-803,1641,6096 } },
{ "Samsung NX11", 0, 0,
{ 10332,-3234,-1168,-6111,14639,1520,-1352,2647,8331 } },
{ "Samsung NX10", 0, 0, /* also NX100 */
{ 10332,-3234,-1168,-6111,14639,1520,-1352,2647,8331 } },
{ "Samsung NX500", 0, 0,
{ 10686,-4042,-1052,-3595,13238,276,-464,1259,5931 } },
{ "Samsung NX5", 0, 0,
{ 10332,-3234,-1168,-6111,14639,1520,-1352,2647,8331 } },
{ "Samsung NX1", 0, 0,
{ 10686,-4042,-1052,-3595,13238,276,-464,1259,5931 } },
{ "Samsung WB2000", 0, 0xfff,
{ 12093,-3557,-1155,-1000,9534,1733,-22,1787,4576 } },
{ "Samsung GX-1", 0, 0,
{ 10504,-2438,-1189,-8603,16207,2531,-1022,863,12242 } },
{ "Samsung GX20", 0, 0, /* copied from Pentax K20D */
{ 9427,-2714,-868,-7493,16092,1373,-2199,3264,7180 } },
{ "Samsung S85", 0, 0, /* DJC */
{ 11885,-3968,-1473,-4214,12299,1916,-835,1655,5549 } },
{ "Sinar", 0, 0, /* DJC */
{ 16442,-2956,-2422,-2877,12128,750,-1136,6066,4559 } },
{ "Sony DSC-F828", 0, 0,
{ 7924,-1910,-777,-8226,15459,2998,-1517,2199,6818,-7242,11401,3481 } },
{ "Sony DSC-R1", 0, 0,
{ 8512,-2641,-694,-8042,15670,2526,-1821,2117,7414 } },
{ "Sony DSC-V3", 0, 0,
{ 7511,-2571,-692,-7894,15088,3060,-948,1111,8128 } },
{ "Sony DSC-RX100M", 0, 0, /* M2, M3, M4, and M5 */
{ 6596,-2079,-562,-4782,13016,1933,-970,1581,5181 } },
{ "Sony DSC-RX100", 0, 0,
{ 8651,-2754,-1057,-3464,12207,1373,-568,1398,4434 } },
{ "Sony DSC-RX10M4", 0, 0,
{ 7699,-2566,-629,-2967,11270,1928,-378,1286,4807 } },
{ "Sony DSC-RX10", 0, 0, /* also RX10M2, RX10M3 */
{ 6679,-1825,-745,-5047,13256,1953,-1580,2422,5183 } },
{ "Sony DSC-RX1RM2", 0, 0,
{ 6629,-1900,-483,-4618,12349,2550,-622,1381,6514 } },
{ "Sony DSC-RX1", 0, 0,
{ 6344,-1612,-462,-4863,12477,2681,-865,1786,6899 } },
{ "Sony DSC-RX0", 200, 0,
{ 9396,-3507,-843,-2497,11111,1572,-343,1355,5089 } },
{ "Sony DSLR-A100", 0, 0xfeb,
{ 9437,-2811,-774,-8405,16215,2290,-710,596,7181 } },
{ "Sony DSLR-A290", 0, 0,
{ 6038,-1484,-579,-9145,16746,2512,-875,746,7218 } },
{ "Sony DSLR-A2", 0, 0,
{ 9847,-3091,-928,-8485,16345,2225,-715,595,7103 } },
{ "Sony DSLR-A300", 0, 0,
{ 9847,-3091,-928,-8485,16345,2225,-715,595,7103 } },
{ "Sony DSLR-A330", 0, 0,
{ 9847,-3091,-929,-8485,16346,2225,-714,595,7103 } },
{ "Sony DSLR-A350", 0, 0xffc,
{ 6038,-1484,-578,-9146,16746,2513,-875,746,7217 } },
{ "Sony DSLR-A380", 0, 0,
{ 6038,-1484,-579,-9145,16746,2512,-875,746,7218 } },
{ "Sony DSLR-A390", 0, 0,
{ 6038,-1484,-579,-9145,16746,2512,-875,746,7218 } },
{ "Sony DSLR-A450", 0, 0xfeb,
{ 4950,-580,-103,-5228,12542,3029,-709,1435,7371 } },
{ "Sony DSLR-A580", 0, 0xfeb,
{ 5932,-1492,-411,-4813,12285,2856,-741,1524,6739 } },
{ "Sony DSLR-A500", 0, 0xfeb,
{ 6046,-1127,-278,-5574,13076,2786,-691,1419,7625 } },
{ "Sony DSLR-A5", 0, 0xfeb,
{ 4950,-580,-103,-5228,12542,3029,-709,1435,7371 } },
{ "Sony DSLR-A700", 0, 0,
{ 5775,-805,-359,-8574,16295,2391,-1943,2341,7249 } },
{ "Sony DSLR-A850", 0, 0,
{ 5413,-1162,-365,-5665,13098,2866,-608,1179,8440 } },
{ "Sony DSLR-A900", 0, 0,
{ 5209,-1072,-397,-8845,16120,2919,-1618,1803,8654 } },
{ "Sony ILCA-68", 0, 0,
{ 6435,-1903,-536,-4722,12449,2550,-663,1363,6517 } },
{ "Sony ILCA-77M2", 0, 0,
{ 5991,-1732,-443,-4100,11989,2381,-704,1467,5992 } },
{ "Sony ILCA-99M2", 0, 0,
{ 6660,-1918,-471,-4613,12398,2485,-649,1433,6447 } },
{ "Sony ILCE-6", 0, 0, /* 6300, 6500 */
{ 5973,-1695,-419,-3826,11797,2293,-639,1398,5789 } },
{ "Sony ILCE-7M2", 0, 0,
{ 5271,-712,-347,-6153,13653,2763,-1601,2366,7242 } },
{ "Sony ILCE-7M3", 0, 0,
{ 7374,-2389,-551,-5435,13162,2519,-1006,1795,6552 } },
{ "Sony ILCE-7S", 0, 0, /* also ILCE-7SM2 */
{ 5838,-1430,-246,-3497,11477,2297,-748,1885,5778 } },
{ "Sony ILCE-7RM3", 0, 0,
{ 6640,-1847,-503,-5238,13010,2474,-993,1673,6527 } },
{ "Sony ILCE-7RM2", 0, 0,
{ 6629,-1900,-483,-4618,12349,2550,-622,1381,6514 } },
{ "Sony ILCE-7R", 0, 0,
{ 4913,-541,-202,-6130,13513,2906,-1564,2151,7183 } },
{ "Sony ILCE-7", 0, 0,
{ 5271,-712,-347,-6153,13653,2763,-1601,2366,7242 } },
{ "Sony ILCE-9", 0, 0,
{ 6389,-1703,-378,-4562,12265,2587,-670,1489,6550 } },
{ "Sony ILCE", 0, 0, /* 3000, 5000, 5100, 6000, and QX1 */
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony NEX-5N", 0, 0,
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony NEX-5R", 0, 0,
{ 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } },
{ "Sony NEX-5T", 0, 0,
{ 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } },
{ "Sony NEX-3N", 0, 0,
{ 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } },
{ "Sony NEX-3", 138, 0, /* DJC */
{ 6907,-1256,-645,-4940,12621,2320,-1710,2581,6230 } },
{ "Sony NEX-5", 116, 0, /* DJC */
{ 6807,-1350,-342,-4216,11649,2567,-1089,2001,6420 } },
{ "Sony NEX-3", 0, 0, /* Adobe */
{ 6549,-1550,-436,-4880,12435,2753,-854,1868,6976 } },
{ "Sony NEX-5", 0, 0, /* Adobe */
{ 6549,-1550,-436,-4880,12435,2753,-854,1868,6976 } },
{ "Sony NEX-6", 0, 0,
{ 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } },
{ "Sony NEX-7", 0, 0,
{ 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } },
{ "Sony NEX", 0, 0, /* NEX-C3, NEX-F3 */
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony SLT-A33", 0, 0,
{ 6069,-1221,-366,-5221,12779,2734,-1024,2066,6834 } },
{ "Sony SLT-A35", 0, 0,
{ 5986,-1618,-415,-4557,11820,3120,-681,1404,6971 } },
{ "Sony SLT-A37", 0, 0,
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony SLT-A55", 0, 0,
{ 5932,-1492,-411,-4813,12285,2856,-741,1524,6739 } },
{ "Sony SLT-A57", 0, 0,
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony SLT-A58", 0, 0,
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony SLT-A65", 0, 0,
{ 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } },
{ "Sony SLT-A77", 0, 0,
{ 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } },
{ "Sony SLT-A99", 0, 0,
{ 6344,-1612,-462,-4863,12477,2681,-865,1786,6899 } },
{ "YI M1", 0, 0,
{ 7712,-2059,-653,-3882,11494,2726,-710,1332,5958 } },
};
double cam_xyz[4][3];
char name[130];
int i, j;
sprintf (name, "%s %s", make, model);
for (i=0; i < sizeof table / sizeof *table; i++)
if (!strncmp (name, table[i].prefix, strlen(table[i].prefix))) {
if (table[i].black) black = (ushort) table[i].black;
if (table[i].maximum) maximum = (ushort) table[i].maximum;
if (table[i].trans[0]) {
for (raw_color = j=0; j < 12; j++)
((double *)cam_xyz)[j] = table[i].trans[j] / 10000.0;
cam_xyz_coeff (rgb_cam, cam_xyz);
}
break;
}
}
void CLASS simple_coeff (int index)
{
static const float table[][12] = {
/* index 0 -- all Foveon cameras */
{ 1.4032,-0.2231,-0.1016,-0.5263,1.4816,0.017,-0.0112,0.0183,0.9113 },
/* index 1 -- Kodak DC20 and DC25 */
{ 2.25,0.75,-1.75,-0.25,-0.25,0.75,0.75,-0.25,-0.25,-1.75,0.75,2.25 },
/* index 2 -- Logitech Fotoman Pixtura */
{ 1.893,-0.418,-0.476,-0.495,1.773,-0.278,-1.017,-0.655,2.672 },
/* index 3 -- Nikon E880, E900, and E990 */
{ -1.936280, 1.800443, -1.448486, 2.584324,
1.405365, -0.524955, -0.289090, 0.408680,
-1.204965, 1.082304, 2.941367, -1.818705 }
};
int i, c;
for (raw_color = i=0; i < 3; i++)
FORCC rgb_cam[i][c] = table[index][i*colors+c];
}
short CLASS guess_byte_order (int words)
{
uchar test[4][2];
int t=2, msb;
double diff, sum[2] = {0,0};
fread (test[0], 2, 2, ifp);
for (words-=2; words--; ) {
fread (test[t], 2, 1, ifp);
for (msb=0; msb < 2; msb++) {
diff = (test[t^2][msb] << 8 | test[t^2][!msb])
- (test[t ][msb] << 8 | test[t ][!msb]);
sum[msb] += diff*diff;
}
t = (t+1) & 3;
}
return sum[0] < sum[1] ? 0x4d4d : 0x4949;
}
float CLASS find_green (int bps, int bite, int off0, int off1)
{
UINT64 bitbuf=0;
int vbits, col, i, c;
ushort img[2][2064];
double sum[]={0,0};
FORC(2) {
fseek (ifp, c ? off1:off0, SEEK_SET);
for (vbits=col=0; col < width; col++) {
for (vbits -= bps; vbits < 0; vbits += bite) {
bitbuf <<= bite;
for (i=0; i < bite; i+=8)
bitbuf |= (unsigned) (fgetc(ifp) << i);
}
img[c][col] = bitbuf << (64-bps-vbits) >> (64-bps);
}
}
FORC(width-1) {
sum[ c & 1] += ABS(img[0][c]-img[1][c+1]);
sum[~c & 1] += ABS(img[1][c]-img[0][c+1]);
}
return 100 * log(sum[0]/sum[1]);
}
/*
Identify which camera created this file, and set global variables
accordingly.
*/
void CLASS identify()
{
static const short pana[][6] = {
{ 3130, 1743, 4, 0, -6, 0 },
{ 3130, 2055, 4, 0, -6, 0 },
{ 3130, 2319, 4, 0, -6, 0 },
{ 3170, 2103, 18, 0,-42, 20 },
{ 3170, 2367, 18, 13,-42,-21 },
{ 3177, 2367, 0, 0, -1, 0 },
{ 3304, 2458, 0, 0, -1, 0 },
{ 3330, 2463, 9, 0, -5, 0 },
{ 3330, 2479, 9, 0,-17, 4 },
{ 3370, 1899, 15, 0,-44, 20 },
{ 3370, 2235, 15, 0,-44, 20 },
{ 3370, 2511, 15, 10,-44,-21 },
{ 3690, 2751, 3, 0, -8, -3 },
{ 3710, 2751, 0, 0, -3, 0 },
{ 3724, 2450, 0, 0, 0, -2 },
{ 3770, 2487, 17, 0,-44, 19 },
{ 3770, 2799, 17, 15,-44,-19 },
{ 3880, 2170, 6, 0, -6, 0 },
{ 4060, 3018, 0, 0, 0, -2 },
{ 4290, 2391, 3, 0, -8, -1 },
{ 4330, 2439, 17, 15,-44,-19 },
{ 4508, 2962, 0, 0, -3, -4 },
{ 4508, 3330, 0, 0, -3, -6 },
};
static const ushort canon[][11] = {
{ 1944, 1416, 0, 0, 48, 0 },
{ 2144, 1560, 4, 8, 52, 2, 0, 0, 0, 25 },
{ 2224, 1456, 48, 6, 0, 2 },
{ 2376, 1728, 12, 6, 52, 2 },
{ 2672, 1968, 12, 6, 44, 2 },
{ 3152, 2068, 64, 12, 0, 0, 16 },
{ 3160, 2344, 44, 12, 4, 4 },
{ 3344, 2484, 4, 6, 52, 6 },
{ 3516, 2328, 42, 14, 0, 0 },
{ 3596, 2360, 74, 12, 0, 0 },
{ 3744, 2784, 52, 12, 8, 12 },
{ 3944, 2622, 30, 18, 6, 2 },
{ 3948, 2622, 42, 18, 0, 2 },
{ 3984, 2622, 76, 20, 0, 2, 14 },
{ 4104, 3048, 48, 12, 24, 12 },
{ 4116, 2178, 4, 2, 0, 0 },
{ 4152, 2772, 192, 12, 0, 0 },
{ 4160, 3124, 104, 11, 8, 65 },
{ 4176, 3062, 96, 17, 8, 0, 0, 16, 0, 7, 0x49 },
{ 4192, 3062, 96, 17, 24, 0, 0, 16, 0, 0, 0x49 },
{ 4312, 2876, 22, 18, 0, 2 },
{ 4352, 2874, 62, 18, 0, 0 },
{ 4476, 2954, 90, 34, 0, 0 },
{ 4480, 3348, 12, 10, 36, 12, 0, 0, 0, 18, 0x49 },
{ 4480, 3366, 80, 50, 0, 0 },
{ 4496, 3366, 80, 50, 12, 0 },
{ 4768, 3516, 96, 16, 0, 0, 0, 16 },
{ 4832, 3204, 62, 26, 0, 0 },
{ 4832, 3228, 62, 51, 0, 0 },
{ 5108, 3349, 98, 13, 0, 0 },
{ 5120, 3318, 142, 45, 62, 0 },
{ 5280, 3528, 72, 52, 0, 0 },
{ 5344, 3516, 142, 51, 0, 0 },
{ 5344, 3584, 126,100, 0, 2 },
{ 5360, 3516, 158, 51, 0, 0 },
{ 5568, 3708, 72, 38, 0, 0 },
{ 5632, 3710, 96, 17, 0, 0, 0, 16, 0, 0, 0x49 },
{ 5712, 3774, 62, 20, 10, 2 },
{ 5792, 3804, 158, 51, 0, 0 },
{ 5920, 3950, 122, 80, 2, 0 },
{ 6096, 4051, 76, 35, 0, 0 },
{ 6096, 4056, 72, 34, 0, 0 },
{ 6288, 4056, 264, 36, 0, 0 },
{ 6384, 4224, 120, 44, 0, 0 },
{ 6880, 4544, 136, 42, 0, 0 },
{ 8896, 5920, 160, 64, 0, 0 },
};
static const struct {
ushort id;
char model[20];
} unique[] = {
{ 0x168, "EOS 10D" }, { 0x001, "EOS-1D" },
{ 0x175, "EOS 20D" }, { 0x174, "EOS-1D Mark II" },
{ 0x234, "EOS 30D" }, { 0x232, "EOS-1D Mark II N" },
{ 0x190, "EOS 40D" }, { 0x169, "EOS-1D Mark III" },
{ 0x261, "EOS 50D" }, { 0x281, "EOS-1D Mark IV" },
{ 0x287, "EOS 60D" }, { 0x167, "EOS-1DS" },
{ 0x325, "EOS 70D" },
{ 0x408, "EOS 77D" }, { 0x331, "EOS M" },
{ 0x350, "EOS 80D" }, { 0x328, "EOS-1D X Mark II" },
{ 0x346, "EOS 100D" },
{ 0x417, "EOS 200D" },
{ 0x170, "EOS 300D" }, { 0x188, "EOS-1Ds Mark II" },
{ 0x176, "EOS 450D" }, { 0x215, "EOS-1Ds Mark III" },
{ 0x189, "EOS 350D" }, { 0x324, "EOS-1D C" },
{ 0x236, "EOS 400D" }, { 0x269, "EOS-1D X" },
{ 0x252, "EOS 500D" }, { 0x213, "EOS 5D" },
{ 0x270, "EOS 550D" }, { 0x218, "EOS 5D Mark II" },
{ 0x286, "EOS 600D" }, { 0x285, "EOS 5D Mark III" },
{ 0x301, "EOS 650D" }, { 0x302, "EOS 6D" },
{ 0x326, "EOS 700D" }, { 0x250, "EOS 7D" },
{ 0x393, "EOS 750D" }, { 0x289, "EOS 7D Mark II" },
{ 0x347, "EOS 760D" }, { 0x406, "EOS 6D Mark II" },
{ 0x405, "EOS 800D" }, { 0x349, "EOS 5D Mark IV" },
{ 0x254, "EOS 1000D" },
{ 0x288, "EOS 1100D" },
{ 0x327, "EOS 1200D" }, { 0x382, "EOS 5DS" },
{ 0x404, "EOS 1300D" }, { 0x401, "EOS 5DS R" },
{ 0x422, "EOS 1500D" },
{ 0x432, "EOS 3000D" },
}, sonique[] = {
{ 0x002, "DSC-R1" }, { 0x100, "DSLR-A100" },
{ 0x101, "DSLR-A900" }, { 0x102, "DSLR-A700" },
{ 0x103, "DSLR-A200" }, { 0x104, "DSLR-A350" },
{ 0x105, "DSLR-A300" }, { 0x108, "DSLR-A330" },
{ 0x109, "DSLR-A230" }, { 0x10a, "DSLR-A290" },
{ 0x10d, "DSLR-A850" }, { 0x111, "DSLR-A550" },
{ 0x112, "DSLR-A500" }, { 0x113, "DSLR-A450" },
{ 0x116, "NEX-5" }, { 0x117, "NEX-3" },
{ 0x118, "SLT-A33" }, { 0x119, "SLT-A55V" },
{ 0x11a, "DSLR-A560" }, { 0x11b, "DSLR-A580" },
{ 0x11c, "NEX-C3" }, { 0x11d, "SLT-A35" },
{ 0x11e, "SLT-A65V" }, { 0x11f, "SLT-A77V" },
{ 0x120, "NEX-5N" }, { 0x121, "NEX-7" },
{ 0x123, "SLT-A37" }, { 0x124, "SLT-A57" },
{ 0x125, "NEX-F3" }, { 0x126, "SLT-A99V" },
{ 0x127, "NEX-6" }, { 0x128, "NEX-5R" },
{ 0x129, "DSC-RX100" }, { 0x12a, "DSC-RX1" },
{ 0x12e, "ILCE-3000" }, { 0x12f, "SLT-A58" },
{ 0x131, "NEX-3N" }, { 0x132, "ILCE-7" },
{ 0x133, "NEX-5T" }, { 0x134, "DSC-RX100M2" },
{ 0x135, "DSC-RX10" }, { 0x136, "DSC-RX1R" },
{ 0x137, "ILCE-7R" }, { 0x138, "ILCE-6000" },
{ 0x139, "ILCE-5000" }, { 0x13d, "DSC-RX100M3" },
{ 0x13e, "ILCE-7S" }, { 0x13f, "ILCA-77M2" },
{ 0x153, "ILCE-5100" }, { 0x154, "ILCE-7M2" },
{ 0x155, "DSC-RX100M4" },{ 0x156, "DSC-RX10M2" },
{ 0x158, "DSC-RX1RM2" }, { 0x15a, "ILCE-QX1" },
{ 0x15b, "ILCE-7RM2" }, { 0x15e, "ILCE-7SM2" },
{ 0x161, "ILCA-68" }, { 0x162, "ILCA-99M2" },
{ 0x163, "DSC-RX10M3" }, { 0x164, "DSC-RX100M5" },
{ 0x165, "ILCE-6300" }, { 0x166, "ILCE-9" },
{ 0x168, "ILCE-6500" }, { 0x16a, "ILCE-7RM3" },
{ 0x16b, "ILCE-7M3" }, { 0x16c, "DSC-RX0" },
{ 0x16d, "DSC-RX10M4" },
};
static const char *orig, panalias[][12] = {
"@DC-FZ80", "DC-FZ82", "DC-FZ85",
"@DC-FZ81", "DC-FZ83",
"@DC-GF9", "DC-GX800", "DC-GX850",
"@DC-GF10", "DC-GF90",
"@DC-GX9", "DC-GX7MK3",
"@DC-ZS70", "DC-TZ90", "DC-TZ91", "DC-TZ92", "DC-TZ93",
"@DMC-FZ40", "DMC-FZ45",
"@DMC-FZ2500", "DMC-FZ2000", "DMC-FZH1",
"@DMC-G8", "DMC-G80", "DMC-G81", "DMC-G85",
"@DMC-GX85", "DMC-GX80", "DMC-GX7MK2",
"@DMC-LX9", "DMC-LX10", "DMC-LX15",
"@DMC-ZS40", "DMC-TZ60", "DMC-TZ61",
"@DMC-ZS50", "DMC-TZ70", "DMC-TZ71",
"@DMC-ZS60", "DMC-TZ80", "DMC-TZ81", "DMC-TZ85",
"@DMC-ZS100", "DMC-ZS110", "DMC-TZ100", "DMC-TZ101", "DMC-TZ110", "DMC-TX1",
"@DC-ZS200", "DC-TX2", "DC-TZ200", "DC-TZ202", "DC-TZ220", "DC-ZS220",
};
static const struct {
unsigned fsize;
ushort rw, rh;
uchar lm, tm, rm, bm, lf, cf, max, flags;
char make[10], model[20];
ushort offset;
} table[] = {
{ 786432,1024, 768, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-080C" },
{ 1447680,1392,1040, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-145C" },
{ 1920000,1600,1200, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-201C" },
{ 5067304,2588,1958, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-510C" },
{ 5067316,2588,1958, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-510C",12 },
{ 10134608,2588,1958, 0, 0, 0, 0, 9,0x94,0,0,"AVT","F-510C" },
{ 10134620,2588,1958, 0, 0, 0, 0, 9,0x94,0,0,"AVT","F-510C",12 },
{ 16157136,3272,2469, 0, 0, 0, 0, 9,0x94,0,0,"AVT","F-810C" },
{ 15980544,3264,2448, 0, 0, 0, 0, 8,0x61,0,1,"AgfaPhoto","DC-833m" },
{ 9631728,2532,1902, 0, 0, 0, 0,96,0x61,0,0,"Alcatel","5035D" },
{ 2868726,1384,1036, 0, 0, 0, 0,64,0x49,0,8,"Baumer","TXG14",1078 },
{ 5298000,2400,1766,12,12,44, 2, 8,0x94,0,2,"Canon","PowerShot SD300" },
{ 6553440,2664,1968, 4, 4,44, 4, 8,0x94,0,2,"Canon","PowerShot A460" },
{ 6573120,2672,1968,12, 8,44, 0, 8,0x94,0,2,"Canon","PowerShot A610" },
{ 6653280,2672,1992,10, 6,42, 2, 8,0x94,0,2,"Canon","PowerShot A530" },
{ 7710960,2888,2136,44, 8, 4, 0, 8,0x94,0,2,"Canon","PowerShot S3 IS" },
{ 9219600,3152,2340,36,12, 4, 0, 8,0x94,0,2,"Canon","PowerShot A620" },
{ 9243240,3152,2346,12, 7,44,13, 8,0x49,0,2,"Canon","PowerShot A470" },
{ 10341600,3336,2480, 6, 5,32, 3, 8,0x94,0,2,"Canon","PowerShot A720 IS" },
{ 10383120,3344,2484,12, 6,44, 6, 8,0x94,0,2,"Canon","PowerShot A630" },
{ 12945240,3736,2772,12, 6,52, 6, 8,0x94,0,2,"Canon","PowerShot A640" },
{ 15636240,4104,3048,48,12,24,12, 8,0x94,0,2,"Canon","PowerShot A650" },
{ 15467760,3720,2772, 6,12,30, 0, 8,0x94,0,2,"Canon","PowerShot SX110 IS" },
{ 15534576,3728,2778,12, 9,44, 9, 8,0x94,0,2,"Canon","PowerShot SX120 IS" },
{ 18653760,4080,3048,24,12,24,12, 8,0x94,0,2,"Canon","PowerShot SX20 IS" },
{ 19131120,4168,3060,92,16, 4, 1, 8,0x94,0,2,"Canon","PowerShot SX220 HS" },
{ 21936096,4464,3276,25,10,73,12, 8,0x16,0,2,"Canon","PowerShot SX30 IS" },
{ 24724224,4704,3504, 8,16,56, 8, 8,0x94,0,2,"Canon","PowerShot A3300 IS" },
{ 30858240,5248,3920, 8,16,56,16, 8,0x94,0,2,"Canon","IXUS 160" },
{ 1976352,1632,1211, 0, 2, 0, 1, 0,0x94,0,1,"Casio","QV-2000UX" },
{ 3217760,2080,1547, 0, 0,10, 1, 0,0x94,0,1,"Casio","QV-3*00EX" },
{ 6218368,2585,1924, 0, 0, 9, 0, 0,0x94,0,1,"Casio","QV-5700" },
{ 7816704,2867,2181, 0, 0,34,36, 0,0x16,0,1,"Casio","EX-Z60" },
{ 2937856,1621,1208, 0, 0, 1, 0, 0,0x94,7,13,"Casio","EX-S20" },
{ 4948608,2090,1578, 0, 0,32,34, 0,0x94,7,1,"Casio","EX-S100" },
{ 6054400,2346,1720, 2, 0,32, 0, 0,0x94,7,1,"Casio","QV-R41" },
{ 7426656,2568,1928, 0, 0, 0, 0, 0,0x94,0,1,"Casio","EX-P505" },
{ 7530816,2602,1929, 0, 0,22, 0, 0,0x94,7,1,"Casio","QV-R51" },
{ 7542528,2602,1932, 0, 0,32, 0, 0,0x94,7,1,"Casio","EX-Z50" },
{ 7562048,2602,1937, 0, 0,25, 0, 0,0x16,7,1,"Casio","EX-Z500" },
{ 7753344,2602,1986, 0, 0,32,26, 0,0x94,7,1,"Casio","EX-Z55" },
{ 9313536,2858,2172, 0, 0,14,30, 0,0x94,7,1,"Casio","EX-P600" },
{ 10834368,3114,2319, 0, 0,27, 0, 0,0x94,0,1,"Casio","EX-Z750" },
{ 10843712,3114,2321, 0, 0,25, 0, 0,0x94,0,1,"Casio","EX-Z75" },
{ 10979200,3114,2350, 0, 0,32,32, 0,0x94,7,1,"Casio","EX-P700" },
{ 12310144,3285,2498, 0, 0, 6,30, 0,0x94,0,1,"Casio","EX-Z850" },
{ 12489984,3328,2502, 0, 0,47,35, 0,0x94,0,1,"Casio","EX-Z8" },
{ 15499264,3754,2752, 0, 0,82, 0, 0,0x94,0,1,"Casio","EX-Z1050" },
{ 18702336,4096,3044, 0, 0,24, 0,80,0x94,7,1,"Casio","EX-ZR100" },
{ 7684000,2260,1700, 0, 0, 0, 0,13,0x94,0,1,"Casio","QV-4000" },
{ 787456,1024, 769, 0, 1, 0, 0, 0,0x49,0,0,"Creative","PC-CAM 600" },
{ 28829184,4384,3288, 0, 0, 0, 0,36,0x61,0,0,"DJI" },
{ 15151104,4608,3288, 0, 0, 0, 0, 0,0x94,0,0,"Matrix" },
{ 3840000,1600,1200, 0, 0, 0, 0,65,0x49,0,0,"Foculus","531C" },
{ 307200, 640, 480, 0, 0, 0, 0, 0,0x94,0,0,"Generic" },
{ 62464, 256, 244, 1, 1, 6, 1, 0,0x8d,0,0,"Kodak","DC20" },
{ 124928, 512, 244, 1, 1,10, 1, 0,0x8d,0,0,"Kodak","DC20" },
{ 1652736,1536,1076, 0,52, 0, 0, 0,0x61,0,0,"Kodak","DCS200" },
{ 4159302,2338,1779, 1,33, 1, 2, 0,0x94,0,0,"Kodak","C330" },
{ 4162462,2338,1779, 1,33, 1, 2, 0,0x94,0,0,"Kodak","C330",3160 },
{ 2247168,1232, 912, 0, 0,16, 0, 0,0x00,0,0,"Kodak","C330" },
{ 3370752,1232, 912, 0, 0,16, 0, 0,0x00,0,0,"Kodak","C330" },
{ 6163328,2864,2152, 0, 0, 0, 0, 0,0x94,0,0,"Kodak","C603" },
{ 6166488,2864,2152, 0, 0, 0, 0, 0,0x94,0,0,"Kodak","C603",3160 },
{ 460800, 640, 480, 0, 0, 0, 0, 0,0x00,0,0,"Kodak","C603" },
{ 9116448,2848,2134, 0, 0, 0, 0, 0,0x00,0,0,"Kodak","C603" },
{ 12241200,4040,3030, 2, 0, 0,13, 0,0x49,0,0,"Kodak","12MP" },
{ 12272756,4040,3030, 2, 0, 0,13, 0,0x49,0,0,"Kodak","12MP",31556 },
{ 18000000,4000,3000, 0, 0, 0, 0, 0,0x00,0,0,"Kodak","12MP" },
{ 614400, 640, 480, 0, 3, 0, 0,64,0x94,0,0,"Kodak","KAI-0340" },
{ 15360000,3200,2400, 0, 0, 0, 0,96,0x16,0,0,"Lenovo","A820" },
{ 3884928,1608,1207, 0, 0, 0, 0,96,0x16,0,0,"Micron","2010",3212 },
{ 1138688,1534, 986, 0, 0, 0, 0, 0,0x61,0,0,"Minolta","RD175",513 },
{ 1581060,1305, 969, 0, 0,18, 6, 6,0x1e,4,1,"Nikon","E900" },
{ 2465792,1638,1204, 0, 0,22, 1, 6,0x4b,5,1,"Nikon","E950" },
{ 2940928,1616,1213, 0, 0, 0, 7,30,0x94,0,1,"Nikon","E2100" },
{ 4771840,2064,1541, 0, 0, 0, 1, 6,0xe1,0,1,"Nikon","E990" },
{ 4775936,2064,1542, 0, 0, 0, 0,30,0x94,0,1,"Nikon","E3700" },
{ 5865472,2288,1709, 0, 0, 0, 1, 6,0xb4,0,1,"Nikon","E4500" },
{ 5869568,2288,1710, 0, 0, 0, 0, 6,0x16,0,1,"Nikon","E4300" },
{ 7438336,2576,1925, 0, 0, 0, 1, 6,0xb4,0,1,"Nikon","E5000" },
{ 8998912,2832,2118, 0, 0, 0, 0,30,0x94,7,1,"Nikon","COOLPIX S6" },
{ 5939200,2304,1718, 0, 0, 0, 0,30,0x16,0,0,"Olympus","C770UZ" },
{ 3178560,2064,1540, 0, 0, 0, 0, 0,0x94,0,1,"Pentax","Optio S" },
{ 4841984,2090,1544, 0, 0,22, 0, 0,0x94,7,1,"Pentax","Optio S" },
{ 6114240,2346,1737, 0, 0,22, 0, 0,0x94,7,1,"Pentax","Optio S4" },
{ 10702848,3072,2322, 0, 0, 0,21,30,0x94,0,1,"Pentax","Optio 750Z" },
{ 4147200,1920,1080, 0, 0, 0, 0, 0,0x49,0,0,"Photron","BC2-HD" },
{ 4151666,1920,1080, 0, 0, 0, 0, 0,0x49,0,0,"Photron","BC2-HD",8 },
{ 13248000,2208,3000, 0, 0, 0, 0,13,0x61,0,0,"Pixelink","A782" },
{ 6291456,2048,1536, 0, 0, 0, 0,96,0x61,0,0,"RoverShot","3320AF" },
{ 311696, 644, 484, 0, 0, 0, 0, 0,0x16,0,8,"ST Micro","STV680 VGA" },
{ 16098048,3288,2448, 0, 0,24, 0, 9,0x94,0,1,"Samsung","S85" },
{ 16215552,3312,2448, 0, 0,48, 0, 9,0x94,0,1,"Samsung","S85" },
{ 20487168,3648,2808, 0, 0, 0, 0,13,0x94,5,1,"Samsung","WB550" },
{ 24000000,4000,3000, 0, 0, 0, 0,13,0x94,5,1,"Samsung","WB550" },
{ 12582980,3072,2048, 0, 0, 0, 0,33,0x61,0,0,"Sinar","",68 },
{ 33292868,4080,4080, 0, 0, 0, 0,33,0x61,0,0,"Sinar","",68 },
{ 44390468,4080,5440, 0, 0, 0, 0,33,0x61,0,0,"Sinar","",68 },
{ 1409024,1376,1024, 0, 0, 1, 0, 0,0x49,0,0,"Sony","XCD-SX910CR" },
{ 2818048,1376,1024, 0, 0, 1, 0,97,0x49,0,0,"Sony","XCD-SX910CR" },
{ 17496000,4320,3240, 0, 0, 0,0,224,0x94,0,0,"Xiro","Xplorer V" },
};
static const char *corp[] =
{ "AgfaPhoto", "Canon", "Casio", "Epson", "Fujifilm",
"Mamiya", "Minolta", "Motorola", "Kodak", "Konica", "Leica",
"Nikon", "Nokia", "Olympus", "Ricoh", "Pentax", "Phase One",
"Samsung", "Sigma", "Sinar", "Sony", "YI" };
char head[32], *cp;
int hlen, flen, fsize, zero_fsize=1, i, c;
struct jhead jh;
tiff_flip = flip = filters = UINT_MAX; /* unknown */
raw_height = raw_width = fuji_width = fuji_layout = cr2_slice[0] = 0;
maximum = height = width = top_margin = left_margin = 0;
cdesc[0] = desc[0] = artist[0] = make[0] = model[0] = model2[0] = 0;
iso_speed = shutter = aperture = focal_len = unique_id = 0;
tiff_nifds = 0;
memset (tiff_ifd, 0, sizeof tiff_ifd);
memset (gpsdata, 0, sizeof gpsdata);
memset (cblack, 0, sizeof cblack);
memset (white, 0, sizeof white);
memset (mask, 0, sizeof mask);
thumb_offset = thumb_length = thumb_width = thumb_height = 0;
load_raw = thumb_load_raw = 0;
write_thumb = &CLASS jpeg_thumb;
data_offset = meta_offset = meta_length = tiff_bps = tiff_compress = 0;
kodak_cbpp = zero_after_ff = dng_version = load_flags = 0;
timestamp = shot_order = tiff_samples = black = is_foveon = 0;
mix_green = profile_length = data_error = zero_is_bad = 0;
pixel_aspect = is_raw = raw_color = 1;
tile_width = tile_length = 0;
for (i=0; i < 4; i++) {
cam_mul[i] = i == 1;
pre_mul[i] = i < 3;
FORC3 cmatrix[c][i] = 0;
FORC3 rgb_cam[c][i] = c == i;
}
colors = 3;
for (i=0; i < 0x10000; i++) curve[i] = i;
order = get2();
hlen = get4();
fseek (ifp, 0, SEEK_SET);
fread (head, 1, 32, ifp);
fseek (ifp, 0, SEEK_END);
flen = fsize = ftell(ifp);
if ((cp = (char *) memmem (head, 32, "MMMM", 4)) ||
(cp = (char *) memmem (head, 32, "IIII", 4))) {
parse_phase_one (cp-head);
if (cp-head && parse_tiff(0)) apply_tiff();
} else if (order == 0x4949 || order == 0x4d4d) {
if (!memcmp (head+6,"HEAPCCDR",8)) {
data_offset = hlen;
parse_ciff (hlen, flen-hlen, 0);
load_raw = &CLASS canon_load_raw;
} else if (parse_tiff(0)) apply_tiff();
} else if (!memcmp (head,"\xff\xd8\xff\xe1",4) &&
!memcmp (head+6,"Exif",4)) {
fseek (ifp, 4, SEEK_SET);
data_offset = 4 + get2();
fseek (ifp, data_offset, SEEK_SET);
if (fgetc(ifp) != 0xff)
parse_tiff(12);
thumb_offset = 0;
} else if (!memcmp (head+25,"ARECOYK",7)) {
strcpy (make, "Contax");
strcpy (model,"N Digital");
fseek (ifp, 33, SEEK_SET);
get_timestamp(1);
fseek (ifp, 60, SEEK_SET);
FORC4 cam_mul[c ^ (c >> 1)] = get4();
} else if (!strcmp (head, "PXN")) {
strcpy (make, "Logitech");
strcpy (model,"Fotoman Pixtura");
} else if (!strcmp (head, "qktk")) {
strcpy (make, "Apple");
strcpy (model,"QuickTake 100");
load_raw = &CLASS quicktake_100_load_raw;
} else if (!strcmp (head, "qktn")) {
strcpy (make, "Apple");
strcpy (model,"QuickTake 150");
load_raw = &CLASS kodak_radc_load_raw;
} else if (!memcmp (head,"FUJIFILM",8)) {
fseek (ifp, 84, SEEK_SET);
thumb_offset = get4();
thumb_length = get4();
fseek (ifp, 92, SEEK_SET);
parse_fuji (get4());
if (thumb_offset > 120) {
fseek (ifp, 120, SEEK_SET);
is_raw += (i = get4()) && 1;
if (is_raw == 2 && shot_select)
parse_fuji (i);
}
fseek (ifp, 100+28*(shot_select > 0), SEEK_SET);
parse_tiff (data_offset = get4());
parse_tiff (thumb_offset+12);
apply_tiff();
if (!load_raw) {
load_raw = &CLASS unpacked_load_raw;
tiff_bps = 14;
}
} else if (!memcmp (head,"RIFF",4)) {
fseek (ifp, 0, SEEK_SET);
parse_riff();
} else if (!memcmp (head+4,"ftypcrx ",8)) {
fseek (ifp, 0, SEEK_SET);
parse_crx (fsize);
} else if (!memcmp (head+4,"ftypqt ",9)) {
fseek (ifp, 0, SEEK_SET);
parse_qt (fsize);
is_raw = 0;
} else if (!memcmp (head,"\0\001\0\001\0@",6)) {
fseek (ifp, 6, SEEK_SET);
fread (make, 1, 8, ifp);
fread (model, 1, 8, ifp);
fread (model2, 1, 16, ifp);
data_offset = get2();
get2();
raw_width = get2();
raw_height = get2();
load_raw = &CLASS nokia_load_raw;
filters = 0x61616161;
} else if (!memcmp (head,"NOKIARAW",8)) {
strcpy (make, "NOKIA");
order = 0x4949;
fseek (ifp, 300, SEEK_SET);
data_offset = get4();
i = get4();
width = get2();
height = get2();
switch (tiff_bps = i*8 / (width * height)) {
case 8: load_raw = &CLASS eight_bit_load_raw; break;
case 10: load_raw = &CLASS nokia_load_raw;
}
raw_height = height + (top_margin = i / (width * tiff_bps/8) - height);
mask[0][3] = 1;
filters = 0x61616161;
} else if (!memcmp (head,"ARRI",4)) {
order = 0x4949;
fseek (ifp, 20, SEEK_SET);
width = get4();
height = get4();
strcpy (make, "ARRI");
fseek (ifp, 668, SEEK_SET);
fread (model, 1, 64, ifp);
data_offset = 4096;
load_raw = &CLASS packed_load_raw;
load_flags = 88;
filters = 0x61616161;
} else if (!memcmp (head,"XPDS",4)) {
order = 0x4949;
fseek (ifp, 0x800, SEEK_SET);
fread (make, 1, 41, ifp);
raw_height = get2();
raw_width = get2();
fseek (ifp, 56, SEEK_CUR);
fread (model, 1, 30, ifp);
data_offset = 0x10000;
load_raw = &CLASS canon_rmf_load_raw;
gamma_curve (0, 12.25, 1, 1023);
} else if (!memcmp (head+4,"RED1",4)) {
strcpy (make, "Red");
strcpy (model,"One");
parse_redcine();
load_raw = &CLASS redcine_load_raw;
gamma_curve (1/2.4, 12.92, 1, 4095);
filters = 0x49494949;
} else if (!memcmp (head,"DSC-Image",9))
parse_rollei();
else if (!memcmp (head,"PWAD",4))
parse_sinar_ia();
else if (!memcmp (head,"\0MRM",4))
parse_minolta(0);
else if (!memcmp (head,"FOVb",4))
parse_foveon();
else if (!memcmp (head,"CI",2))
parse_cine();
if (make[0] == 0)
for (zero_fsize=i=0; i < sizeof table / sizeof *table; i++)
if (fsize == table[i].fsize) {
strcpy (make, table[i].make );
strcpy (model, table[i].model);
flip = table[i].flags >> 2;
zero_is_bad = table[i].flags & 2;
if (table[i].flags & 1)
parse_external_jpeg();
data_offset = table[i].offset;
raw_width = table[i].rw;
raw_height = table[i].rh;
left_margin = table[i].lm;
top_margin = table[i].tm;
width = raw_width - left_margin - table[i].rm;
height = raw_height - top_margin - table[i].bm;
filters = 0x1010101 * table[i].cf;
colors = 4 - !((filters & filters >> 1) & 0x5555);
load_flags = table[i].lf;
switch (tiff_bps = (fsize-data_offset)*8 / (raw_width*raw_height)) {
case 6:
load_raw = &CLASS minolta_rd175_load_raw; break;
case 8:
load_raw = &CLASS eight_bit_load_raw; break;
case 10: case 12:
load_flags |= 512;
if (!strcmp(make,"Canon")) load_flags |= 256;
load_raw = &CLASS packed_load_raw; break;
case 16:
order = 0x4949 | 0x404 * (load_flags & 1);
tiff_bps -= load_flags >> 4;
tiff_bps -= load_flags = load_flags >> 1 & 7;
load_raw = &CLASS unpacked_load_raw;
}
maximum = (1 << tiff_bps) - (1 << table[i].max);
}
if (zero_fsize) fsize = 0;
if (make[0] == 0) parse_smal (0, flen);
if (make[0] == 0) {
parse_jpeg(0);
if (!(strncmp(model,"ov",2) && strncmp(model,"RP_OV",5)) &&
!fseek (ifp, -6404096, SEEK_END) &&
fread (head, 1, 32, ifp) && !strcmp(head,"BRCMn")) {
strcpy (make, "OmniVision");
data_offset = ftell(ifp) + 0x8000-32;
width = raw_width;
raw_width = 2611;
load_raw = &CLASS nokia_load_raw;
filters = 0x16161616;
} else is_raw = 0;
}
for (i=0; i < sizeof corp / sizeof *corp; i++)
if (strcasestr (make, corp[i])) /* Simplify company names */
strcpy (make, corp[i]);
if ((!strcmp(make,"Kodak") || !strcmp(make,"Leica")) &&
((cp = strcasestr(model," DIGITAL CAMERA")) ||
(cp = strstr(model,"FILE VERSION"))))
*cp = 0;
if (!strncasecmp(model,"PENTAX",6))
strcpy (make, "Pentax");
cp = make + strlen(make); /* Remove trailing spaces */
while (*--cp == ' ') *cp = 0;
cp = model + strlen(model);
while (*--cp == ' ') *cp = 0;
i = strlen(make); /* Remove make from model */
if (!strncasecmp (model, make, i) && model[i++] == ' ')
memmove (model, model+i, 64-i);
if (!strncmp (model,"FinePix ",8))
strcpy (model, model+8);
if (!strncmp (model,"Digital Camera ",15))
strcpy (model, model+15);
desc[511] = artist[63] = make[63] = model[63] = model2[63] = 0;
if (!is_raw) goto notraw;
if (!height) height = raw_height;
if (!width) width = raw_width;
if (height == 2624 && width == 3936) /* Pentax K10D and Samsung GX10 */
{ height = 2616; width = 3896; }
if (height == 3136 && width == 4864) /* Pentax K20D and Samsung GX20 */
{ height = 3124; width = 4688; filters = 0x16161616; }
if (raw_height == 2868 && (!strcmp(model,"K-r") || !strcmp(model,"K-x")))
{ width = 4309; filters = 0x16161616; }
if (raw_height == 3136 && !strcmp(model,"K-7"))
{ height = 3122; width = 4684; filters = 0x16161616; top_margin = 2; }
if (raw_height == 3284 && !strncmp(model,"K-5",3))
{ left_margin = 10; width = 4950; filters = 0x16161616; }
if (raw_height == 3300 && !strncmp(model,"K-50",4))
{ height = 3288, width = 4952; left_margin = 0; top_margin = 12; }
if (raw_height == 3664 && !strncmp(model,"K-S",3))
{ width = 5492; left_margin = 0; }
if (raw_height == 4032 && !strcmp(model,"K-3"))
{ height = 4032; width = 6040; left_margin = 4; }
if (raw_height == 4060 && !strcmp(model,"KP"))
{ height = 4032; width = 6032; left_margin = 52; top_margin = 28; }
if (raw_height == 4950 && !strcmp(model,"K-1"))
{ height = 4932; width = 7380; left_margin = 4; top_margin = 18; }
if (raw_height == 5552 && !strcmp(model,"645D"))
{ height = 5502; width = 7328; left_margin = 48; top_margin = 29;
filters = 0x61616161; }
if (height == 3014 && width == 4096) /* Ricoh GX200 */
width = 4014;
if (dng_version) {
if (filters == UINT_MAX) filters = 0;
if (filters) is_raw *= tiff_samples;
else colors = tiff_samples;
switch (tiff_compress) {
case 0:
case 1: load_raw = &CLASS packed_dng_load_raw; break;
case 7: load_raw = &CLASS lossless_dng_load_raw; break;
case 34892: load_raw = &CLASS lossy_dng_load_raw; break;
default: load_raw = 0;
}
goto dng_skip;
}
if (!strcmp(make,"Canon") && !fsize && tiff_bps != 15) {
if (!load_raw)
load_raw = &CLASS lossless_jpeg_load_raw;
for (i=0; i < sizeof canon / sizeof *canon; i++)
if (raw_width == canon[i][0] && raw_height == canon[i][1]) {
width = raw_width - (left_margin = canon[i][2]);
height = raw_height - (top_margin = canon[i][3]);
width -= canon[i][4];
height -= canon[i][5];
mask[0][1] = canon[i][6];
mask[0][3] = -canon[i][7];
mask[1][1] = canon[i][8];
mask[1][3] = -canon[i][9];
if (canon[i][10]) filters = canon[i][10] * 0x01010101;
}
if ((unique_id | 0x20000) == 0x2720000) {
left_margin = 8;
top_margin = 16;
}
}
for (i=0; i < sizeof unique / sizeof *unique; i++)
if (unique_id == 0x80000000 + unique[i].id) {
adobe_coeff ("Canon", unique[i].model);
if (model[4] == 'K' && strlen(model) == 8)
strcpy (model, unique[i].model);
}
for (i=0; i < sizeof sonique / sizeof *sonique; i++)
if (unique_id == sonique[i].id)
strcpy (model, sonique[i].model);
for (i=0; i < sizeof panalias / sizeof *panalias; i++)
if (panalias[i][0] == '@') orig = panalias[i]+1;
else if (!strcmp(model,panalias[i]))
adobe_coeff ("Panasonic", orig);
if (!strcmp(make,"Nikon")) {
if (!load_raw)
load_raw = &CLASS packed_load_raw;
if (model[0] == 'E')
load_flags |= !data_offset << 2 | 2;
}
/* Set parameters based on camera name (for non-DNG files). */
if (!strcmp(model,"KAI-0340")
&& find_green (16, 16, 3840, 5120) < 25) {
height = 480;
top_margin = filters = 0;
strcpy (model,"C603");
}
if (!strcmp(make,"Sony") && raw_width > 3888)
black = 128 << (tiff_bps - 12);
if (is_foveon) {
if (height*2 < width) pixel_aspect = 0.5;
if (height > width) pixel_aspect = 2;
filters = 0;
simple_coeff(0);
} else if (!strcmp(make,"Canon") && tiff_bps == 15) {
switch (width) {
case 3344: width -= 66;
case 3872: width -= 6;
}
if (height > width) {
SWAP(height,width);
SWAP(raw_height,raw_width);
}
if (width == 7200 && height == 3888) {
raw_width = width = 6480;
raw_height = height = 4320;
}
filters = 0;
tiff_samples = colors = 3;
load_raw = &CLASS canon_sraw_load_raw;
} else if (!strcmp(model,"PowerShot 600")) {
height = 613;
width = 854;
raw_width = 896;
colors = 4;
filters = 0xe1e4e1e4;
load_raw = &CLASS canon_600_load_raw;
} else if (!strcmp(model,"PowerShot A5") ||
!strcmp(model,"PowerShot A5 Zoom")) {
height = 773;
width = 960;
raw_width = 992;
pixel_aspect = 256/235.0;
filters = 0x1e4e1e4e;
goto canon_a5;
} else if (!strcmp(model,"PowerShot A50")) {
height = 968;
width = 1290;
raw_width = 1320;
filters = 0x1b4e4b1e;
goto canon_a5;
} else if (!strcmp(model,"PowerShot Pro70")) {
height = 1024;
width = 1552;
filters = 0x1e4b4e1b;
canon_a5:
colors = 4;
tiff_bps = 10;
load_raw = &CLASS packed_load_raw;
load_flags = 264;
} else if (!strcmp(model,"PowerShot Pro90 IS") ||
!strcmp(model,"PowerShot G1")) {
colors = 4;
filters = 0xb4b4b4b4;
} else if (!strcmp(model,"PowerShot A610")) {
if (canon_s2is()) strcpy (model+10, "S2 IS");
} else if (!strcmp(model,"PowerShot SX220 HS")) {
mask[1][3] = -4;
} else if (!strcmp(model,"EOS D2000C")) {
filters = 0x61616161;
black = curve[200];
} else if (!strcmp(model,"EOS 80D")) {
top_margin -= 2;
height += 2;
} else if (!strcmp(model,"D1")) {
cam_mul[0] *= 256/527.0;
cam_mul[2] *= 256/317.0;
} else if (!strcmp(model,"D1X")) {
width -= 4;
pixel_aspect = 0.5;
} else if (!strcmp(model,"D40X") ||
!strcmp(model,"D60") ||
!strcmp(model,"D80") ||
!strcmp(model,"D3000")) {
height -= 3;
width -= 4;
} else if (!strcmp(model,"D3") ||
!strcmp(model,"D3S") ||
!strcmp(model,"D700")) {
width -= 4;
left_margin = 2;
} else if (!strcmp(model,"D3100")) {
width -= 28;
left_margin = 6;
} else if (!strcmp(model,"D5000") ||
!strcmp(model,"D90")) {
width -= 42;
} else if (!strcmp(model,"D5100") ||
!strcmp(model,"D7000") ||
!strcmp(model,"COOLPIX A")) {
width -= 44;
} else if (!strcmp(model,"D3200") ||
!strncmp(model,"D6",2) ||
!strncmp(model,"D800",4)) {
width -= 46;
} else if (!strcmp(model,"D4") ||
!strcmp(model,"Df")) {
width -= 52;
left_margin = 2;
} else if (!strncmp(model,"D40",3) ||
!strncmp(model,"D50",3) ||
!strncmp(model,"D70",3)) {
width--;
} else if (!strcmp(model,"D100")) {
if (load_flags)
raw_width = (width += 3) + 3;
} else if (!strcmp(model,"D200")) {
left_margin = 1;
width -= 4;
filters = 0x94949494;
} else if (!strncmp(model,"D2H",3)) {
left_margin = 6;
width -= 14;
} else if (!strncmp(model,"D2X",3)) {
if (width == 3264) width -= 32;
else width -= 8;
} else if (!strncmp(model,"D300",4)) {
width -= 32;
} else if (!strncmp(model,"COOLPIX B",9)) {
load_flags = 24;
} else if (!strncmp(model,"COOLPIX P",9) && raw_width != 4032) {
load_flags = 24;
filters = 0x94949494;
if (model[9] == '7' && iso_speed >= 400)
black = 255;
} else if (!strncmp(model,"1 ",2)) {
height -= 2;
} else if (fsize == 1581060) {
simple_coeff(3);
pre_mul[0] = 1.2085;
pre_mul[1] = 1.0943;
pre_mul[3] = 1.1103;
} else if (fsize == 3178560) {
cam_mul[0] *= 4;
cam_mul[2] *= 4;
} else if (fsize == 4771840) {
if (!timestamp && nikon_e995())
strcpy (model, "E995");
if (strcmp(model,"E995")) {
filters = 0xb4b4b4b4;
simple_coeff(3);
pre_mul[0] = 1.196;
pre_mul[1] = 1.246;
pre_mul[2] = 1.018;
}
} else if (fsize == 2940928) {
if (!timestamp && !nikon_e2100())
strcpy (model,"E2500");
if (!strcmp(model,"E2500")) {
height -= 2;
load_flags = 6;
colors = 4;
filters = 0x4b4b4b4b;
}
} else if (fsize == 4775936) {
if (!timestamp) nikon_3700();
if (model[0] == 'E' && atoi(model+1) < 3700)
filters = 0x49494949;
if (!strcmp(model,"Optio 33WR")) {
flip = 1;
filters = 0x16161616;
}
if (make[0] == 'O') {
i = find_green (12, 32, 1188864, 3576832);
c = find_green (12, 32, 2383920, 2387016);
if (abs(i) < abs(c)) {
SWAP(i,c);
load_flags = 24;
}
if (i < 0) filters = 0x61616161;
}
} else if (fsize == 5869568) {
if (!timestamp && minolta_z2()) {
strcpy (make, "Minolta");
strcpy (model,"DiMAGE Z2");
}
load_flags = 6 + 24*(make[0] == 'M');
} else if (fsize == 6291456) {
fseek (ifp, 0x300000, SEEK_SET);
if ((order = guess_byte_order(0x10000)) == 0x4d4d) {
height -= (top_margin = 16);
width -= (left_margin = 28);
maximum = 0xf5c0;
strcpy (make, "ISG");
model[0] = 0;
}
} else if (!strcmp(make,"Fujifilm")) {
if (!strcmp(model+7,"S2Pro")) {
strcpy (model,"S2Pro");
height = 2144;
width = 2880;
flip = 6;
}
top_margin = (raw_height - height) >> 2 << 1;
left_margin = (raw_width - width ) >> 2 << 1;
if (width == 2848 || width == 3664) filters = 0x16161616;
if (width == 4032 || width == 4952 || width == 6032 || width == 8280) left_margin = 0;
if (width == 3328 && (width -= 66)) left_margin = 34;
if (width == 4936) left_margin = 4;
if (!strcmp(model,"HS50EXR") ||
!strcmp(model,"F900EXR")) {
width += 2;
left_margin = 0;
filters = 0x16161616;
}
if (fuji_layout) raw_width *= is_raw;
if (filters == 9)
FORC(36) ((char *)xtrans)[c] =
xtrans_abs[(c/6+top_margin) % 6][(c+left_margin) % 6];
} else if (!strcmp(model,"KD-400Z")) {
height = 1712;
width = 2312;
raw_width = 2336;
goto konica_400z;
} else if (!strcmp(model,"KD-510Z")) {
goto konica_510z;
} else if (!strcasecmp(make,"Minolta")) {
if (!load_raw && (maximum = 0xfff))
load_raw = &CLASS unpacked_load_raw;
if (!strncmp(model,"DiMAGE A",8)) {
if (!strcmp(model,"DiMAGE A200"))
filters = 0x49494949;
tiff_bps = 12;
load_raw = &CLASS packed_load_raw;
} else if (!strncmp(model,"ALPHA",5) ||
!strncmp(model,"DYNAX",5) ||
!strncmp(model,"MAXXUM",6)) {
sprintf (model+20, "DYNAX %-10s", model+6+(model[0]=='M'));
adobe_coeff (make, model+20);
load_raw = &CLASS packed_load_raw;
} else if (!strncmp(model,"DiMAGE G",8)) {
if (model[8] == '4') {
height = 1716;
width = 2304;
} else if (model[8] == '5') {
konica_510z:
height = 1956;
width = 2607;
raw_width = 2624;
} else if (model[8] == '6') {
height = 2136;
width = 2848;
}
data_offset += 14;
filters = 0x61616161;
konica_400z:
load_raw = &CLASS unpacked_load_raw;
maximum = 0x3df;
order = 0x4d4d;
}
} else if (!strcmp(model,"*ist D")) {
load_raw = &CLASS unpacked_load_raw;
data_error = -1;
} else if (!strcmp(model,"*ist DS")) {
height -= 2;
} else if (!strcmp(make,"Samsung") && raw_width == 4704) {
height -= top_margin = 8;
width -= 2 * (left_margin = 8);
load_flags = 256;
} else if (!strcmp(make,"Samsung") && raw_height == 3714) {
height -= top_margin = 18;
left_margin = raw_width - (width = 5536);
if (raw_width != 5600)
left_margin = top_margin = 0;
filters = 0x61616161;
colors = 3;
} else if (!strcmp(make,"Samsung") && raw_width == 5632) {
order = 0x4949;
height = 3694;
top_margin = 2;
width = 5574 - (left_margin = 32 + tiff_bps);
if (tiff_bps == 12) load_flags = 80;
} else if (!strcmp(make,"Samsung") && raw_width == 5664) {
height -= top_margin = 17;
left_margin = 96;
width = 5544;
filters = 0x49494949;
} else if (!strcmp(make,"Samsung") && raw_width == 6496) {
filters = 0x61616161;
black = 1 << (tiff_bps - 7);
} else if (!strcmp(model,"EX1")) {
order = 0x4949;
height -= 20;
top_margin = 2;
if ((width -= 6) > 3682) {
height -= 10;
width -= 46;
top_margin = 8;
}
} else if (!strcmp(model,"WB2000")) {
order = 0x4949;
height -= 3;
top_margin = 2;
if ((width -= 10) > 3718) {
height -= 28;
width -= 56;
top_margin = 8;
}
} else if (strstr(model,"WB550")) {
strcpy (model, "WB550");
} else if (!strcmp(model,"EX2F")) {
height = 3045;
width = 4070;
top_margin = 3;
order = 0x4949;
filters = 0x49494949;
load_raw = &CLASS unpacked_load_raw;
} else if (!strcmp(model,"STV680 VGA")) {
black = 16;
} else if (!strcmp(model,"N95")) {
height = raw_height - (top_margin = 2);
} else if (!strcmp(model,"640x480")) {
gamma_curve (0.45, 4.5, 1, 255);
} else if (!strcmp(make,"Hasselblad")) {
if (load_raw == &CLASS lossless_jpeg_load_raw)
load_raw = &CLASS hasselblad_load_raw;
if (raw_width == 7262) {
height = 5444;
width = 7248;
top_margin = 4;
left_margin = 7;
filters = 0x61616161;
} else if (raw_width == 7410 || raw_width == 8282) {
height -= 84;
width -= 82;
top_margin = 4;
left_margin = 41;
filters = 0x61616161;
} else if (raw_width == 8384) {
height = 6208;
width = 8280;
top_margin = 96;
left_margin = 46;
} else if (raw_width == 9044) {
height = 6716;
width = 8964;
top_margin = 8;
left_margin = 40;
black += load_flags = 256;
maximum = 0x8101;
} else if (raw_width == 4090) {
strcpy (model, "V96C");
height -= (top_margin = 6);
width -= (left_margin = 3) + 7;
filters = 0x61616161;
}
if (tiff_samples > 1) {
is_raw = tiff_samples+1;
if (!shot_select && !half_size) filters = 0;
}
} else if (!strcmp(make,"Sinar")) {
if (!load_raw) load_raw = &CLASS unpacked_load_raw;
if (is_raw > 1 && !shot_select && !half_size) filters = 0;
maximum = 0x3fff;
} else if (!strcmp(make,"Leaf")) {
maximum = 0x3fff;
fseek (ifp, data_offset, SEEK_SET);
if (ljpeg_start (&jh, 1) && jh.bits == 15)
maximum = 0x1fff;
if (tiff_samples > 1) filters = 0;
if (tiff_samples > 1 || tile_length < raw_height) {
load_raw = &CLASS leaf_hdr_load_raw;
raw_width = tile_width;
}
if ((width | height) == 2048) {
if (tiff_samples == 1) {
filters = 1;
strcpy (cdesc, "RBTG");
strcpy (model, "CatchLight");
top_margin = 8; left_margin = 18; height = 2032; width = 2016;
} else {
strcpy (model, "DCB2");
top_margin = 10; left_margin = 16; height = 2028; width = 2022;
}
} else if (width+height == 3144+2060) {
if (!model[0]) strcpy (model, "Cantare");
if (width > height) {
top_margin = 6; left_margin = 32; height = 2048; width = 3072;
filters = 0x61616161;
} else {
left_margin = 6; top_margin = 32; width = 2048; height = 3072;
filters = 0x16161616;
}
if (!cam_mul[0] || model[0] == 'V') filters = 0;
else is_raw = tiff_samples;
} else if (width == 2116) {
strcpy (model, "Valeo 6");
height -= 2 * (top_margin = 30);
width -= 2 * (left_margin = 55);
filters = 0x49494949;
} else if (width == 3171) {
strcpy (model, "Valeo 6");
height -= 2 * (top_margin = 24);
width -= 2 * (left_margin = 24);
filters = 0x16161616;
}
} else if (!strcmp(make,"Leica") || !strcmp(make,"Panasonic")) {
if ((flen - data_offset) / (raw_width*8/7) == raw_height)
load_raw = &CLASS panasonic_load_raw;
if (!load_raw) {
load_raw = &CLASS unpacked_load_raw;
load_flags = 4;
}
zero_is_bad = 1;
if ((height += 12) > raw_height) height = raw_height;
for (i=0; i < sizeof pana / sizeof *pana; i++)
if (raw_width == pana[i][0] && raw_height == pana[i][1]) {
left_margin = pana[i][2];
top_margin = pana[i][3];
width += pana[i][4];
height += pana[i][5];
}
filters = 0x01010101 * (uchar) "\x94\x61\x49\x16"
[((filters-1) ^ (left_margin & 1) ^ (top_margin << 1)) & 3];
} else if (!strcmp(model,"C770UZ")) {
height = 1718;
width = 2304;
filters = 0x16161616;
load_raw = &CLASS packed_load_raw;
load_flags = 30;
} else if (!strcmp(make,"Olympus")) {
height += height & 1;
if (exif_cfa) filters = exif_cfa;
if (width == 4100) width -= 4;
if (width == 4080) width -= 24;
if (width == 9280) { width -= 6; height -= 6; }
if (load_raw == &CLASS unpacked_load_raw)
load_flags = 4;
tiff_bps = 12;
if (!strcmp(model,"E-300") ||
!strcmp(model,"E-500")) {
width -= 20;
if (load_raw == &CLASS unpacked_load_raw) {
maximum = 0xfc3;
memset (cblack, 0, sizeof cblack);
}
} else if (!strcmp(model,"E-330")) {
width -= 30;
if (load_raw == &CLASS unpacked_load_raw)
maximum = 0xf79;
} else if (!strcmp(model,"SP550UZ")) {
thumb_length = flen - (thumb_offset = 0xa39800);
thumb_height = 480;
thumb_width = 640;
} else if (!strcmp(model,"TG-4")) {
width -= 16;
} else if (!strcmp(model,"TG-5")) {
width -= 6;
}
} else if (!strcmp(model,"N Digital")) {
height = 2047;
width = 3072;
filters = 0x61616161;
data_offset = 0x1a00;
load_raw = &CLASS packed_load_raw;
} else if (!strcmp(model,"DSC-F828")) {
width = 3288;
left_margin = 5;
mask[1][3] = -17;
data_offset = 862144;
load_raw = &CLASS sony_load_raw;
filters = 0x9c9c9c9c;
colors = 4;
strcpy (cdesc, "RGBE");
} else if (!strcmp(model,"DSC-V3")) {
width = 3109;
left_margin = 59;
mask[0][1] = 9;
data_offset = 787392;
load_raw = &CLASS sony_load_raw;
} else if (!strcmp(make,"Sony") && raw_width == 3984) {
width = 3925;
order = 0x4d4d;
} else if (!strcmp(make,"Sony") && raw_width == 4288) {
width -= 32;
} else if (!strcmp(make,"Sony") && raw_width == 4600) {
if (!strcmp(model,"DSLR-A350"))
height -= 4;
black = 0;
} else if (!strcmp(make,"Sony") && raw_width == 4928) {
if (height < 3280) width -= 8;
} else if (!strcmp(make,"Sony") && raw_width == 5504) {
width -= height > 3664 ? 8 : 32;
if (!strncmp(model,"DSC",3))
black = 200 << (tiff_bps - 12);
} else if (!strcmp(make,"Sony") && raw_width == 6048) {
width -= 24;
if (strstr(model,"RX1") || strstr(model,"A99"))
width -= 6;
} else if (!strcmp(make,"Sony") && raw_width == 7392) {
width -= 30;
} else if (!strcmp(make,"Sony") && raw_width == 8000) {
width -= 32;
} else if (!strcmp(model,"DSLR-A100")) {
if (width == 3880) {
height--;
width = ++raw_width;
} else {
height -= 4;
width -= 4;
order = 0x4d4d;
load_flags = 2;
}
filters = 0x61616161;
} else if (!strcmp(model,"PIXL")) {
height -= top_margin = 4;
width -= left_margin = 32;
gamma_curve (0, 7, 1, 255);
} else if (!strcmp(model,"C603") || !strcmp(model,"C330")
|| !strcmp(model,"12MP")) {
order = 0x4949;
if (filters && data_offset) {
fseek (ifp, data_offset < 4096 ? 168 : 5252, SEEK_SET);
read_shorts (curve, 256);
} else gamma_curve (0, 3.875, 1, 255);
load_raw = filters ? &CLASS eight_bit_load_raw :
strcmp(model,"C330") ? &CLASS kodak_c603_load_raw :
&CLASS kodak_c330_load_raw;
load_flags = tiff_bps > 16;
tiff_bps = 8;
} else if (!strncasecmp(model,"EasyShare",9)) {
data_offset = data_offset < 0x15000 ? 0x15000 : 0x17000;
load_raw = &CLASS packed_load_raw;
} else if (!strcasecmp(make,"Kodak")) {
if (filters == UINT_MAX) filters = 0x61616161;
if (!strncmp(model,"NC2000",6) ||
!strncmp(model,"EOSDCS",6) ||
!strncmp(model,"DCS4",4)) {
width -= 4;
left_margin = 2;
if (model[6] == ' ') model[6] = 0;
if (!strcmp(model,"DCS460A")) goto bw;
} else if (!strcmp(model,"DCS660M")) {
black = 214;
goto bw;
} else if (!strcmp(model,"DCS760M")) {
bw: colors = 1;
filters = 0;
}
if (!strcmp(model+4,"20X"))
strcpy (cdesc, "MYCY");
if (strstr(model,"DC25")) {
strcpy (model, "DC25");
data_offset = 15424;
}
if (!strncmp(model,"DC2",3)) {
raw_height = 2 + (height = 242);
if (flen < 100000) {
raw_width = 256; width = 249;
pixel_aspect = (4.0*height) / (3.0*width);
} else {
raw_width = 512; width = 501;
pixel_aspect = (493.0*height) / (373.0*width);
}
top_margin = left_margin = 1;
colors = 4;
filters = 0x8d8d8d8d;
simple_coeff(1);
pre_mul[1] = 1.179;
pre_mul[2] = 1.209;
pre_mul[3] = 1.036;
load_raw = &CLASS eight_bit_load_raw;
} else if (!strcmp(model,"40")) {
strcpy (model, "DC40");
height = 512;
width = 768;
data_offset = 1152;
load_raw = &CLASS kodak_radc_load_raw;
tiff_bps = 12;
} else if (strstr(model,"DC50")) {
strcpy (model, "DC50");
height = 512;
width = 768;
data_offset = 19712;
load_raw = &CLASS kodak_radc_load_raw;
} else if (strstr(model,"DC120")) {
strcpy (model, "DC120");
height = 976;
width = 848;
pixel_aspect = height/0.75/width;
load_raw = tiff_compress == 7 ?
&CLASS kodak_jpeg_load_raw : &CLASS kodak_dc120_load_raw;
} else if (!strcmp(model,"DCS200")) {
thumb_height = 128;
thumb_width = 192;
thumb_offset = 6144;
thumb_misc = 360;
write_thumb = &CLASS layer_thumb;
black = 17;
}
} else if (!strcmp(model,"Fotoman Pixtura")) {
height = 512;
width = 768;
data_offset = 3632;
load_raw = &CLASS kodak_radc_load_raw;
filters = 0x61616161;
simple_coeff(2);
} else if (!strncmp(model,"QuickTake",9)) {
if (head[5]) strcpy (model+10, "200");
fseek (ifp, 544, SEEK_SET);
height = get2();
width = get2();
data_offset = (get4(),get2()) == 30 ? 738:736;
if (height > width) {
SWAP(height,width);
fseek (ifp, data_offset-6, SEEK_SET);
flip = ~get2() & 3 ? 5:6;
}
filters = 0x61616161;
} else if (!strcmp(make,"Rollei") && !load_raw) {
switch (raw_width) {
case 1316:
height = 1030;
width = 1300;
top_margin = 1;
left_margin = 6;
break;
case 2568:
height = 1960;
width = 2560;
top_margin = 2;
left_margin = 8;
}
filters = 0x16161616;
load_raw = &CLASS rollei_load_raw;
}
if (!model[0])
sprintf (model, "%dx%d", width, height);
if (filters == UINT_MAX) filters = 0x94949494;
if (thumb_offset && !thumb_height) {
fseek (ifp, thumb_offset, SEEK_SET);
if (ljpeg_start (&jh, 1)) {
thumb_width = jh.wide;
thumb_height = jh.high;
}
}
dng_skip:
if ((use_camera_matrix & (use_camera_wb || dng_version))
&& cmatrix[0][0] > 0.125) {
memcpy (rgb_cam, cmatrix, sizeof cmatrix);
raw_color = 0;
}
if (raw_color) adobe_coeff (make, model);
if (load_raw == &CLASS kodak_radc_load_raw)
if (raw_color) adobe_coeff ("Apple","Quicktake");
if (fuji_width) {
fuji_width = width >> !fuji_layout;
filters = fuji_width & 1 ? 0x94949494 : 0x49494949;
width = (height >> fuji_layout) + fuji_width;
height = width - 1;
pixel_aspect = 1;
} else {
if (raw_height < height) raw_height = height;
if (raw_width < width ) raw_width = width;
}
if (!tiff_bps) tiff_bps = 12;
if (!maximum) maximum = (1 << tiff_bps) - 1;
if (!load_raw || height < 22 || width < 22 ||
tiff_bps > 16 || tiff_samples > 6 || colors > 4)
is_raw = 0;
#ifdef NO_JASPER
if (load_raw == &CLASS redcine_load_raw) {
fprintf (stderr,_("%s: You must link dcraw with %s!!\n"),
ifname, "libjasper");
is_raw = 0;
}
#endif
#ifdef NO_JPEG
if (load_raw == &CLASS kodak_jpeg_load_raw ||
load_raw == &CLASS lossy_dng_load_raw) {
fprintf (stderr,_("%s: You must link dcraw with %s!!\n"),
ifname, "libjpeg");
is_raw = 0;
}
#endif
if (!cdesc[0])
strcpy (cdesc, colors == 3 ? "RGBG":"GMCY");
if (!raw_height) raw_height = height;
if (!raw_width ) raw_width = width;
if (filters > 999 && colors == 3)
filters |= ((filters >> 2 & 0x22222222) |
(filters << 2 & 0x88888888)) & filters << 1;
notraw:
if (flip == UINT_MAX) flip = tiff_flip;
if (flip == UINT_MAX) flip = 0;
}
#ifndef NO_LCMS
void CLASS apply_profile (const char *input, const char *output)
{
char *prof;
cmsHPROFILE hInProfile=0, hOutProfile=0;
cmsHTRANSFORM hTransform;
FILE *fp;
unsigned size;
if (strcmp (input, "embed"))
hInProfile = cmsOpenProfileFromFile (input, "r");
else if (profile_length) {
prof = (char *) malloc (profile_length);
merror (prof, "apply_profile()");
fseek (ifp, profile_offset, SEEK_SET);
fread (prof, 1, profile_length, ifp);
hInProfile = cmsOpenProfileFromMem (prof, profile_length);
free (prof);
} else
fprintf (stderr,_("%s has no embedded profile.\n"), ifname);
if (!hInProfile) return;
if (!output)
hOutProfile = cmsCreate_sRGBProfile();
else if ((fp = fopen (output, "rb"))) {
fread (&size, 4, 1, fp);
fseek (fp, 0, SEEK_SET);
oprof = (unsigned *) malloc (size = ntohl(size));
merror (oprof, "apply_profile()");
fread (oprof, 1, size, fp);
fclose (fp);
if (!(hOutProfile = cmsOpenProfileFromMem (oprof, size))) {
free (oprof);
oprof = 0;
}
} else
fprintf (stderr,_("Cannot open file %s!\n"), output);
if (!hOutProfile) goto quit;
if (verbose)
fprintf (stderr,_("Applying color profile...\n"));
hTransform = cmsCreateTransform (hInProfile, TYPE_RGBA_16,
hOutProfile, TYPE_RGBA_16, INTENT_PERCEPTUAL, 0);
cmsDoTransform (hTransform, image, image, width*height);
raw_color = 1; /* Don't use rgb_cam with a profile */
cmsDeleteTransform (hTransform);
cmsCloseProfile (hOutProfile);
quit:
cmsCloseProfile (hInProfile);
}
#endif
void CLASS convert_to_rgb()
{
int row, col, c, i, j, k;
ushort *img;
float out[3], out_cam[3][4];
double num, inverse[3][3];
static const double xyzd50_srgb[3][3] =
{ { 0.436083, 0.385083, 0.143055 },
{ 0.222507, 0.716888, 0.060608 },
{ 0.013930, 0.097097, 0.714022 } };
static const double rgb_rgb[3][3] =
{ { 1,0,0 }, { 0,1,0 }, { 0,0,1 } };
static const double adobe_rgb[3][3] =
{ { 0.715146, 0.284856, 0.000000 },
{ 0.000000, 1.000000, 0.000000 },
{ 0.000000, 0.041166, 0.958839 } };
static const double wide_rgb[3][3] =
{ { 0.593087, 0.404710, 0.002206 },
{ 0.095413, 0.843149, 0.061439 },
{ 0.011621, 0.069091, 0.919288 } };
static const double prophoto_rgb[3][3] =
{ { 0.529317, 0.330092, 0.140588 },
{ 0.098368, 0.873465, 0.028169 },
{ 0.016879, 0.117663, 0.865457 } };
static const double aces_rgb[3][3] =
{ { 0.432996, 0.375380, 0.189317 },
{ 0.089427, 0.816523, 0.102989 },
{ 0.019165, 0.118150, 0.941914 } };
static const double (*out_rgb[])[3] =
{ rgb_rgb, adobe_rgb, wide_rgb, prophoto_rgb, xyz_rgb, aces_rgb };
static const char *name[] =
{ "sRGB", "Adobe RGB (1998)", "WideGamut D65", "ProPhoto D65", "XYZ", "ACES" };
static const unsigned phead[] =
{ 1024, 0, 0x2100000, 0x6d6e7472, 0x52474220, 0x58595a20, 0, 0, 0,
0x61637370, 0, 0, 0x6e6f6e65, 0, 0, 0, 0, 0xf6d6, 0x10000, 0xd32d };
unsigned pbody[] =
{ 10, 0x63707274, 0, 36, /* cprt */
0x64657363, 0, 40, /* desc */
0x77747074, 0, 20, /* wtpt */
0x626b7074, 0, 20, /* bkpt */
0x72545243, 0, 14, /* rTRC */
0x67545243, 0, 14, /* gTRC */
0x62545243, 0, 14, /* bTRC */
0x7258595a, 0, 20, /* rXYZ */
0x6758595a, 0, 20, /* gXYZ */
0x6258595a, 0, 20 }; /* bXYZ */
static const unsigned pwhite[] = { 0xf351, 0x10000, 0x116cc };
unsigned pcurve[] = { 0x63757276, 0, 1, 0x1000000 };
gamma_curve (gamm[0], gamm[1], 0, 0);
memcpy (out_cam, rgb_cam, sizeof out_cam);
raw_color |= colors == 1 || document_mode ||
output_color < 1 || output_color > 6;
if (!raw_color) {
oprof = (unsigned *) calloc (phead[0], 1);
merror (oprof, "convert_to_rgb()");
memcpy (oprof, phead, sizeof phead);
if (output_color == 5) oprof[4] = oprof[5];
oprof[0] = 132 + 12*pbody[0];
for (i=0; i < pbody[0]; i++) {
oprof[oprof[0]/4] = i ? (i > 1 ? 0x58595a20 : 0x64657363) : 0x74657874;
pbody[i*3+2] = oprof[0];
oprof[0] += (pbody[i*3+3] + 3) & -4;
}
memcpy (oprof+32, pbody, sizeof pbody);
oprof[pbody[5]/4+2] = strlen(name[output_color-1]) + 1;
memcpy ((char *)oprof+pbody[8]+8, pwhite, sizeof pwhite);
pcurve[3] = (short)(256/gamm[5]+0.5) << 16;
for (i=4; i < 7; i++)
memcpy ((char *)oprof+pbody[i*3+2], pcurve, sizeof pcurve);
pseudoinverse ((double (*)[3]) out_rgb[output_color-1], inverse, 3);
for (i=0; i < 3; i++)
for (j=0; j < 3; j++) {
for (num = k=0; k < 3; k++)
num += xyzd50_srgb[i][k] * inverse[j][k];
oprof[pbody[j*3+23]/4+i+2] = num * 0x10000 + 0.5;
}
for (i=0; i < phead[0]/4; i++)
oprof[i] = htonl(oprof[i]);
strcpy ((char *)oprof+pbody[2]+8, "auto-generated by dcraw");
strcpy ((char *)oprof+pbody[5]+12, name[output_color-1]);
for (i=0; i < 3; i++)
for (j=0; j < colors; j++)
for (out_cam[i][j] = k=0; k < 3; k++)
out_cam[i][j] += out_rgb[output_color-1][i][k] * rgb_cam[k][j];
}
if (verbose)
fprintf (stderr, raw_color ? _("Building histograms...\n") :
_("Converting to %s colorspace...\n"), name[output_color-1]);
memset (histogram, 0, sizeof histogram);
for (img=image[0], row=0; row < height; row++)
for (col=0; col < width; col++, img+=4) {
if (!raw_color) {
out[0] = out[1] = out[2] = 0;
FORCC {
out[0] += out_cam[0][c] * img[c];
out[1] += out_cam[1][c] * img[c];
out[2] += out_cam[2][c] * img[c];
}
FORC3 img[c] = CLIP((int) out[c]);
}
else if (document_mode)
img[0] = img[fcol(row,col)];
FORCC histogram[c][img[c] >> 3]++;
}
if (colors == 4 && output_color) colors = 3;
if (document_mode && filters) colors = 1;
}
void CLASS fuji_rotate()
{
int i, row, col;
double step;
float r, c, fr, fc;
unsigned ur, uc;
ushort wide, high, (*img)[4], (*pix)[4];
if (!fuji_width) return;
if (verbose)
fprintf (stderr,_("Rotating image 45 degrees...\n"));
fuji_width = (fuji_width - 1 + shrink) >> shrink;
step = sqrt(0.5);
wide = fuji_width / step;
high = (height - fuji_width) / step;
img = (ushort (*)[4]) calloc (high, wide*sizeof *img);
merror (img, "fuji_rotate()");
#pragma omp parallel for default(shared) private(row,col,ur,uc,r,c,fr,fc,pix,i)
for (row=0; row < high; row++)
for (col=0; col < wide; col++) {
ur = r = fuji_width + (row-col)*step;
uc = c = (row+col)*step;
if (ur > height-2 || uc > width-2) continue;
fr = r - ur;
fc = c - uc;
pix = image + ur*width + uc;
for (i=0; i < colors; i++)
img[row*wide+col][i] =
(pix[ 0][i]*(1-fc) + pix[ 1][i]*fc) * (1-fr) +
(pix[width][i]*(1-fc) + pix[width+1][i]*fc) * fr;
}
free (image);
width = wide;
height = high;
image = img;
fuji_width = 0;
}
void CLASS stretch()
{
ushort newdim, (*img)[4], *pix0, *pix1;
int row, col, c;
double rc, frac;
if (pixel_aspect == 1) return;
if (verbose) fprintf (stderr,_("Stretching the image...\n"));
if (pixel_aspect < 1) {
newdim = height / pixel_aspect + 0.5;
img = (ushort (*)[4]) calloc (width, newdim*sizeof *img);
merror (img, "stretch()");
for (rc=row=0; row < newdim; row++, rc+=pixel_aspect) {
frac = rc - (c = rc);
pix0 = pix1 = image[c*width];
if (c+1 < height) pix1 += width*4;
for (col=0; col < width; col++, pix0+=4, pix1+=4)
FORCC img[row*width+col][c] = pix0[c]*(1-frac) + pix1[c]*frac + 0.5;
}
height = newdim;
} else {
newdim = width * pixel_aspect + 0.5;
img = (ushort (*)[4]) calloc (height, newdim*sizeof *img);
merror (img, "stretch()");
for (rc=col=0; col < newdim; col++, rc+=1/pixel_aspect) {
frac = rc - (c = rc);
pix0 = pix1 = image[c];
if (c+1 < width) pix1 += 4;
for (row=0; row < height; row++, pix0+=width*4, pix1+=width*4)
FORCC img[row*newdim+col][c] = pix0[c]*(1-frac) + pix1[c]*frac + 0.5;
}
width = newdim;
}
free (image);
image = img;
}
int CLASS flip_index (int row, int col)
{
if (flip & 4) SWAP(row,col);
if (flip & 2) row = iheight - 1 - row;
if (flip & 1) col = iwidth - 1 - col;
return row * iwidth + col;
}
struct tiff_tag {
ushort tag, type;
int count;
union { char c[4]; short s[2]; int i; } val;
};
struct tiff_hdr {
ushort order, magic;
int ifd;
ushort pad, ntag;
struct tiff_tag tag[23];
int nextifd;
ushort pad2, nexif;
struct tiff_tag exif[4];
ushort pad3, ngps;
struct tiff_tag gpst[10];
short bps[4];
int rat[10];
unsigned gps[26];
char desc[512], make[64], model[64], soft[32], date[20], artist[64];
};
#if defined(__MINGW32__) || defined(__sun) || defined(__APPLE__)
size_t strnlen(const char *str, size_t maxlen) {
const char *end = memchr(str, 0, maxlen);
return end ? (size_t)(end - str) : maxlen;
}
#endif
void CLASS tiff_set (struct tiff_hdr *th, ushort *ntag,
ushort tag, ushort type, int count, int val)
{
struct tiff_tag *tt;
int c;
tt = (struct tiff_tag *)(ntag+1) + (*ntag)++;
tt->val.i = val;
if (type == 1 && count <= 4)
FORC(4) tt->val.c[c] = val >> (c << 3);
else if (type == 2) {
count = strnlen((char *)th + val, count-1) + 1;
if (count <= 4)
FORC(4) tt->val.c[c] = ((char *)th)[val+c];
} else if (type == 3 && count <= 2)
FORC(2) tt->val.s[c] = val >> (c << 4);
tt->count = count;
tt->type = type;
tt->tag = tag;
}
#define TOFF(ptr) ((char *)(&(ptr)) - (char *)th)
void CLASS tiff_head (struct tiff_hdr *th, int full)
{
int c, psize=0;
struct tm *t;
memset (th, 0, sizeof *th);
th->order = htonl(0x4d4d4949) >> 16;
th->magic = 42;
th->ifd = 10;
th->rat[0] = th->rat[2] = 300;
th->rat[1] = th->rat[3] = 1;
FORC(6) th->rat[4+c] = 1000000;
th->rat[4] *= shutter;
th->rat[6] *= aperture;
th->rat[8] *= focal_len;
strncpy (th->desc, desc, 512);
strncpy (th->make, make, 64);
strncpy (th->model, model, 64);
strcpy (th->soft, "dcraw v"DCRAW_VERSION);
t = localtime (×tamp);
sprintf (th->date, "%04d:%02d:%02d %02d:%02d:%02d",
t->tm_year+1900,t->tm_mon+1,t->tm_mday,t->tm_hour,t->tm_min,t->tm_sec);
strncpy (th->artist, artist, 64);
if (full) {
tiff_set (th, &th->ntag, 254, 4, 1, 0);
tiff_set (th, &th->ntag, 256, 4, 1, width);
tiff_set (th, &th->ntag, 257, 4, 1, height);
tiff_set (th, &th->ntag, 258, 3, colors, output_bps);
if (colors > 2)
th->tag[th->ntag-1].val.i = TOFF(th->bps);
FORC4 th->bps[c] = output_bps;
tiff_set (th, &th->ntag, 259, 3, 1, 1);
tiff_set (th, &th->ntag, 262, 3, 1, 1 + (colors > 1));
}
tiff_set (th, &th->ntag, 270, 2, 512, TOFF(th->desc));
tiff_set (th, &th->ntag, 271, 2, 64, TOFF(th->make));
tiff_set (th, &th->ntag, 272, 2, 64, TOFF(th->model));
if (full) {
if (oprof) psize = ntohl(oprof[0]);
tiff_set (th, &th->ntag, 273, 4, 1, sizeof *th + psize);
tiff_set (th, &th->ntag, 277, 3, 1, colors);
tiff_set (th, &th->ntag, 278, 4, 1, height);
tiff_set (th, &th->ntag, 279, 4, 1, height*width*colors*output_bps/8);
} else
tiff_set (th, &th->ntag, 274, 3, 1, "12435867"[flip]-'0');
tiff_set (th, &th->ntag, 282, 5, 1, TOFF(th->rat[0]));
tiff_set (th, &th->ntag, 283, 5, 1, TOFF(th->rat[2]));
tiff_set (th, &th->ntag, 284, 3, 1, 1);
tiff_set (th, &th->ntag, 296, 3, 1, 2);
tiff_set (th, &th->ntag, 305, 2, 32, TOFF(th->soft));
tiff_set (th, &th->ntag, 306, 2, 20, TOFF(th->date));
tiff_set (th, &th->ntag, 315, 2, 64, TOFF(th->artist));
tiff_set (th, &th->ntag, 34665, 4, 1, TOFF(th->nexif));
if (psize) tiff_set (th, &th->ntag, 34675, 7, psize, sizeof *th);
tiff_set (th, &th->nexif, 33434, 5, 1, TOFF(th->rat[4]));
tiff_set (th, &th->nexif, 33437, 5, 1, TOFF(th->rat[6]));
tiff_set (th, &th->nexif, 34855, 3, 1, iso_speed);
tiff_set (th, &th->nexif, 37386, 5, 1, TOFF(th->rat[8]));
if (gpsdata[1]) {
tiff_set (th, &th->ntag, 34853, 4, 1, TOFF(th->ngps));
tiff_set (th, &th->ngps, 0, 1, 4, 0x202);
tiff_set (th, &th->ngps, 1, 2, 2, gpsdata[29]);
tiff_set (th, &th->ngps, 2, 5, 3, TOFF(th->gps[0]));
tiff_set (th, &th->ngps, 3, 2, 2, gpsdata[30]);
tiff_set (th, &th->ngps, 4, 5, 3, TOFF(th->gps[6]));
tiff_set (th, &th->ngps, 5, 1, 1, gpsdata[31]);
tiff_set (th, &th->ngps, 6, 5, 1, TOFF(th->gps[18]));
tiff_set (th, &th->ngps, 7, 5, 3, TOFF(th->gps[12]));
tiff_set (th, &th->ngps, 18, 2, 12, TOFF(th->gps[20]));
tiff_set (th, &th->ngps, 29, 2, 12, TOFF(th->gps[23]));
memcpy (th->gps, gpsdata, sizeof th->gps);
}
}
void CLASS jpeg_thumb()
{
char *thumb;
ushort exif[5];
struct tiff_hdr th;
thumb = (char *) malloc (thumb_length);
merror (thumb, "jpeg_thumb()");
fread (thumb, 1, thumb_length, ifp);
fputc (0xff, ofp);
fputc (0xd8, ofp);
if (strcmp (thumb+6, "Exif")) {
memcpy (exif, "\xff\xe1 Exif\0\0", 10);
exif[1] = htons (8 + sizeof th);
fwrite (exif, 1, sizeof exif, ofp);
tiff_head (&th, 0);
fwrite (&th, 1, sizeof th, ofp);
}
fwrite (thumb+2, 1, thumb_length-2, ofp);
free (thumb);
}
void CLASS write_ppm_tiff()
{
struct tiff_hdr th;
uchar *ppm;
ushort *ppm2;
int c, row, col, soff, rstep, cstep;
int perc, val, total, white=0x2000;
perc = width * height * 0.01; /* 99th percentile white level */
if (fuji_width) perc /= 2;
if (!((highlight & ~2) || no_auto_bright))
for (white=c=0; c < colors; c++) {
for (val=0x2000, total=0; --val > 32; )
if ((total += histogram[c][val]) > perc) break;
if (white < val) white = val;
}
gamma_curve (gamm[0], gamm[1], 2, (white << 3)/bright);
iheight = height;
iwidth = width;
if (flip & 4) SWAP(height,width);
ppm = (uchar *) calloc (width, colors*output_bps/8);
ppm2 = (ushort *) ppm;
merror (ppm, "write_ppm_tiff()");
if (output_tiff) {
tiff_head (&th, 1);
fwrite (&th, sizeof th, 1, ofp);
if (oprof)
fwrite (oprof, ntohl(oprof[0]), 1, ofp);
} else if (colors > 3)
fprintf (ofp,
"P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\nTUPLTYPE %s\nENDHDR\n",
width, height, colors, (1 << output_bps)-1, cdesc);
else
fprintf (ofp, "P%d\n%d %d\n%d\n",
colors/2+5, width, height, (1 << output_bps)-1);
soff = flip_index (0, 0);
cstep = flip_index (0, 1) - soff;
rstep = flip_index (1, 0) - flip_index (0, width);
for (row=0; row < height; row++, soff += rstep) {
for (col=0; col < width; col++, soff += cstep)
if (output_bps == 8)
FORCC ppm [col*colors+c] = curve[image[soff][c]] >> 8;
else FORCC ppm2[col*colors+c] = curve[image[soff][c]];
if (output_bps == 16 && !output_tiff && htons(0x55aa) != 0x55aa)
swab (ppm2, ppm2, width*colors*2);
fwrite (ppm, colors*output_bps/8, width, ofp);
}
free (ppm);
}
int CLASS main (int argc, const char **argv)
{
int arg, status=0, quality, i, c;
int timestamp_only=0, thumbnail_only=0, identify_only=0;
int user_qual=-1, user_black=-1, user_sat=-1, user_flip=-1;
int use_fuji_rotate=1, write_to_stdout=0, read_from_stdin=0;
const char *sp, *bpfile=0, *dark_frame=0, *write_ext;
char opm, opt, *ofname, *cp;
struct utimbuf ut;
#ifndef NO_LCMS
const char *cam_profile=0, *out_profile=0;
#endif
#ifdef LIGHTZONE
const char *ofbase = 0;
#endif
#ifndef LOCALTIME
putenv ((char *) "TZ=UTC");
#endif
#ifdef LOCALEDIR
setlocale (LC_CTYPE, "");
setlocale (LC_MESSAGES, "");
bindtextdomain ("dcraw", LOCALEDIR);
textdomain ("dcraw");
#endif
if (argc == 1) {
printf(_("\nRaw photo decoder \"dcraw\" v%s"), DCRAW_VERSION);
printf(_("\nby Dave Coffin, dcoffin a cybercom o net\n"));
printf(_("\nUsage: %s [OPTION]... [FILE]...\n\n"), argv[0]);
puts(_("-v Print verbose messages"));
puts(_("-c Write image data to standard output"));
puts(_("-e Extract embedded thumbnail image"));
puts(_("-i Identify files without decoding them"));
puts(_("-i -v Identify files and show metadata"));
puts(_("-z Change file dates to camera timestamp"));
puts(_("-w Use camera white balance, if possible"));
puts(_("-a Average the whole image for white balance"));
puts(_("-A <x y w h> Average a grey box for white balance"));
puts(_("-r <r g b g> Set custom white balance"));
puts(_("+M/-M Use/don't use an embedded color matrix"));
puts(_("-C <r b> Correct chromatic aberration"));
puts(_("-P <file> Fix the dead pixels listed in this file"));
puts(_("-K <file> Subtract dark frame (16-bit raw PGM)"));
puts(_("-k <num> Set the darkness level"));
puts(_("-S <num> Set the saturation level"));
puts(_("-n <num> Set threshold for wavelet denoising"));
puts(_("-H [0-9] Highlight mode (0=clip, 1=unclip, 2=blend, 3+=rebuild)"));
puts(_("-t [0-7] Flip image (0=none, 3=180, 5=90CCW, 6=90CW)"));
puts(_("-o [0-6] Output colorspace (raw,sRGB,Adobe,Wide,ProPhoto,XYZ,ACES)"));
#ifndef NO_LCMS
puts(_("-o <file> Apply output ICC profile from file"));
puts(_("-p <file> Apply camera ICC profile from file or \"embed\""));
#endif
puts(_("-d Document mode (no color, no interpolation)"));
puts(_("-D Document mode without scaling (totally raw)"));
puts(_("-j Don't stretch or rotate raw pixels"));
puts(_("-W Don't automatically brighten the image"));
puts(_("-b <num> Adjust brightness (default = 1.0)"));
puts(_("-g <p ts> Set custom gamma curve (default = 2.222 4.5)"));
puts(_("-q [0-3] Set the interpolation quality"));
puts(_("-h Half-size color image (twice as fast as \"-q 0\")"));
puts(_("-f Interpolate RGGB as four colors"));
puts(_("-m <num> Apply a 3x3 median filter to R-G and B-G"));
puts(_("-s [0..N-1] Select one raw image or \"all\" from each file"));
puts(_("-6 Write 16-bit instead of 8-bit"));
puts(_("-4 Linear 16-bit, same as \"-6 -W -g 1 1\""));
puts(_("-T Write TIFF instead of PPM"));
puts("");
return 1;
}
argv[argc] = "";
for (arg=1; (((opm = argv[arg][0]) - 2) | 2) == '+'; ) {
opt = argv[arg++][1];
if ((cp = (char *) strchr (sp="nbrkStqmHACg", opt)))
for (i=0; i < "114111111422"[cp-sp]-'0'; i++)
if (!isdigit(argv[arg+i][0])) {
fprintf (stderr,_("Non-numeric argument to \"-%c\"\n"), opt);
return 1;
}
switch (opt) {
case 'n': threshold = atof(argv[arg++]); break;
case 'b': bright = atof(argv[arg++]); break;
case 'r':
FORC4 user_mul[c] = atof(argv[arg++]); break;
case 'C': aber[0] = 1 / atof(argv[arg++]);
aber[2] = 1 / atof(argv[arg++]); break;
case 'g': gamm[0] = atof(argv[arg++]);
gamm[1] = atof(argv[arg++]);
if (gamm[0]) gamm[0] = 1/gamm[0]; break;
case 'k': user_black = atoi(argv[arg++]); break;
case 'S': user_sat = atoi(argv[arg++]); break;
case 't': user_flip = atoi(argv[arg++]); break;
case 'q': user_qual = atoi(argv[arg++]); break;
case 'm': med_passes = atoi(argv[arg++]); break;
case 'H': highlight = atoi(argv[arg++]); break;
case 's':
shot_select = abs(atoi(argv[arg]));
multi_out = !strcmp(argv[arg++],"all");
break;
case 'o':
if (isdigit(argv[arg][0]) && !argv[arg][1])
output_color = atoi(argv[arg++]);
#ifndef NO_LCMS
else out_profile = argv[arg++];
break;
case 'p': cam_profile = argv[arg++];
#endif
break;
#ifdef LIGHTZONE
case 'F': ofbase = argv[arg++]; break;
#endif
case 'P': bpfile = argv[arg++]; break;
case 'K': dark_frame = argv[arg++]; break;
case 'z': timestamp_only = 1; break;
case 'e': thumbnail_only = 1; break;
case 'i': identify_only = 1; break;
case 'c': write_to_stdout = 1; break;
case 'v': verbose = 1; break;
case 'h': half_size = 1; break;
case 'f': four_color_rgb = 1; break;
case 'A': FORC4 greybox[c] = atoi(argv[arg++]);
case 'a': use_auto_wb = 1; break;
case 'w': use_camera_wb = 1; break;
case 'M': use_camera_matrix = 3 * (opm == '+'); break;
case 'I': read_from_stdin = 1; break;
case 'E': document_mode++;
case 'D': document_mode++;
case 'd': document_mode++;
case 'j': use_fuji_rotate = 0; break;
case 'W': no_auto_bright = 1; break;
case 'T': output_tiff = 1; break;
case '4': gamm[0] = gamm[1] =
no_auto_bright = 1;
case '6': output_bps = 16; break;
default:
fprintf (stderr,_("Unknown option \"-%c\".\n"), opt);
return 1;
}
}
#ifdef LIGHTZONE
if (!half_size && !thumbnail_only) {
float rimm[3][3] = {
{ 0.7977, 0.2880, 0.0000 },
{ 0.1352, 0.7119, 0.0000 },
{ 0.0313, 0.0001, 0.8249 } };
int i, j;
for (i=0; i < 3; ++i)
for (j=0; j < 3; ++j)
xyz_rgb[i][j] = rimm[j][i];
}
#endif
if (arg == argc) {
fprintf (stderr,_("No files to process.\n"));
return 1;
}
if (write_to_stdout) {
if (isatty(1)) {
fprintf (stderr,_("Will not write an image to the terminal!\n"));
return 1;
}
#if defined(WIN32) || defined(DJGPP) || defined(__CYGWIN__)
if (setmode(1,O_BINARY) < 0) {
perror ("setmode()");
return 1;
}
#endif
}
for ( ; arg < argc; arg++) {
status = 1;
raw_image = 0;
image = 0;
oprof = 0;
meta_data = ofname = 0;
ofp = stdout;
if (setjmp (failure)) {
if (fileno(ifp) > 2) fclose(ifp);
if (fileno(ofp) > 2) fclose(ofp);
status = 1;
goto cleanup;
}
ifname = argv[arg];
if (!(ifp = fopen (ifname, "rb"))) {
perror (ifname);
continue;
}
status = (identify(),!is_raw);
if (user_flip >= 0)
flip = user_flip;
switch ((flip+3600) % 360) {
case 270: flip = 5; break;
case 180: flip = 3; break;
case 90: flip = 6;
}
if (timestamp_only) {
if ((status = !timestamp))
fprintf (stderr,_("%s has no timestamp.\n"), ifname);
else if (identify_only)
printf ("%10ld%10d %s\n", (long) timestamp, shot_order, ifname);
else {
if (verbose)
fprintf (stderr,_("%s time set to %d.\n"), ifname, (int) timestamp);
ut.actime = ut.modtime = timestamp;
utime (ifname, &ut);
}
goto next;
}
write_fun = &CLASS write_ppm_tiff;
if (thumbnail_only) {
if ((status = !thumb_offset)) {
fprintf (stderr,_("%s has no thumbnail.\n"), ifname);
goto next;
} else if (thumb_load_raw) {
load_raw = thumb_load_raw;
data_offset = thumb_offset;
height = thumb_height;
width = thumb_width;
filters = 0;
colors = 3;
} else {
fseek (ifp, thumb_offset, SEEK_SET);
write_fun = write_thumb;
goto thumbnail;
}
}
if (load_raw == &CLASS kodak_ycbcr_load_raw) {
height += height & 1;
width += width & 1;
}
if (identify_only && verbose && make[0]) {
printf (_("\nFilename: %s\n"), ifname);
printf (_("Timestamp: %s"), ctime(×tamp));
printf (_("Camera: %s %s\n"), make, model);
if (artist[0])
printf (_("Owner: %s\n"), artist);
if (dng_version) {
printf (_("DNG Version: "));
for (i=24; i >= 0; i -= 8)
printf ("%d%c", dng_version >> i & 255, i ? '.':'\n');
}
printf (_("ISO speed: %d\n"), (int) iso_speed);
printf (_("Shutter: "));
if (shutter > 0 && shutter < 1)
shutter = (printf ("1/"), 1 / shutter);
printf (_("%0.1f sec\n"), shutter);
printf (_("Aperture: f/%0.1f\n"), aperture);
printf (_("Focal length: %0.1f mm\n"), focal_len);
printf (_("Embedded ICC profile: %s\n"), profile_length ? _("yes"):_("no"));
printf (_("Number of raw images: %d\n"), is_raw);
if (pixel_aspect != 1)
printf (_("Pixel Aspect Ratio: %0.6f\n"), pixel_aspect);
if (thumb_offset)
printf (_("Thumb size: %4d x %d\n"), thumb_width, thumb_height);
printf (_("Full size: %4d x %d\n"), raw_width, raw_height);
} else if (!is_raw)
fprintf (stderr,_("Cannot decode file %s\n"), ifname);
if (!is_raw) goto next;
shrink = filters && (half_size || (!identify_only &&
(threshold || aber[0] != 1 || aber[2] != 1)));
iheight = (height + shrink) >> shrink;
iwidth = (width + shrink) >> shrink;
if (identify_only) {
if (verbose) {
if (document_mode == 3) {
top_margin = left_margin = fuji_width = 0;
height = raw_height;
width = raw_width;
}
iheight = (height + shrink) >> shrink;
iwidth = (width + shrink) >> shrink;
if (use_fuji_rotate) {
if (fuji_width) {
fuji_width = (fuji_width - 1 + shrink) >> shrink;
iwidth = fuji_width / sqrt(0.5);
iheight = (iheight - fuji_width) / sqrt(0.5);
} else {
if (pixel_aspect < 1) iheight = iheight / pixel_aspect + 0.5;
if (pixel_aspect > 1) iwidth = iwidth * pixel_aspect + 0.5;
}
}
if (flip & 4)
SWAP(iheight,iwidth);
printf (_("Image size: %4d x %d\n"), width, height);
printf (_("Output size: %4d x %d\n"), iwidth, iheight);
printf (_("Raw colors: %d"), colors);
if (filters) {
int fhigh = 2, fwide = 2;
if ((filters ^ (filters >> 8)) & 0xff) fhigh = 4;
if ((filters ^ (filters >> 16)) & 0xffff) fhigh = 8;
if (filters == 1) fhigh = fwide = 16;
if (filters == 9) fhigh = fwide = 6;
printf (_("\nFilter pattern: "));
for (i=0; i < fhigh; i++)
for (c = i && putchar('/') && 0; c < fwide; c++)
putchar (cdesc[fcol(i,c)]);
}
#ifdef LIGHTZONE
printf(_("\nCamera RGB Profile:"));
for (i=0; i<3; ++i) FORCC printf(" %f", rgb_cam[i][c]);
#endif
printf (_("\nDaylight multipliers:"));
FORCC printf (" %f", pre_mul[c]);
if (cam_mul[0] > 0) {
printf (_("\nCamera multipliers:"));
FORC4 printf (" %f", cam_mul[c]);
}
putchar ('\n');
} else
printf (_("%s is a %s %s image.\n"), ifname, make, model);
next:
fclose(ifp);
continue;
}
if (meta_length) {
meta_data = (char *) malloc (meta_length);
merror (meta_data, "main()");
}
if (filters || colors == 1) {
raw_image = (ushort *) calloc ((raw_height+7), raw_width*2);
merror (raw_image, "main()");
} else {
image = (ushort (*)[4]) calloc (iheight, iwidth*sizeof *image);
merror (image, "main()");
}
if (verbose)
fprintf (stderr,_("Loading %s %s image from %s ...\n"),
make, model, ifname);
if (shot_select >= is_raw)
fprintf (stderr,_("%s: \"-s %d\" requests a nonexistent image!\n"),
ifname, shot_select);
fseeko (ifp, data_offset, SEEK_SET);
if (raw_image && read_from_stdin)
fread (raw_image, 2, raw_height*raw_width, stdin);
else (*load_raw)();
if (document_mode == 3) {
top_margin = left_margin = fuji_width = 0;
height = raw_height;
width = raw_width;
}
iheight = (height + shrink) >> shrink;
iwidth = (width + shrink) >> shrink;
if (raw_image) {
image = (ushort (*)[4]) calloc (iheight, iwidth*sizeof *image);
merror (image, "main()");
crop_masked_pixels();
free (raw_image);
}
if (zero_is_bad) remove_zeroes();
bad_pixels (bpfile);
if (dark_frame) subtract (dark_frame);
quality = 2 + !fuji_width;
if (user_qual >= 0) quality = user_qual;
i = cblack[3];
FORC3 if (i > cblack[c]) i = cblack[c];
FORC4 cblack[c] -= i;
black += i;
i = cblack[6];
FORC (cblack[4] * cblack[5])
if (i > cblack[6+c]) i = cblack[6+c];
FORC (cblack[4] * cblack[5])
cblack[6+c] -= i;
black += i;
if (user_black >= 0) black = user_black;
FORC4 cblack[c] += black;
if (user_sat > 0) maximum = user_sat;
#ifdef COLORCHECK
colorcheck();
#endif
/*
if (is_foveon) {
if (document_mode || load_raw == &CLASS foveon_dp_load_raw) {
for (i=0; i < height*width*4; i++)
if ((short) image[0][i] < 0) image[0][i] = 0;
} else foveon_interpolate();
} else */
if (document_mode < 2)
scale_colors();
pre_interpolate();
if (filters && !document_mode) {
if (quality == 0)
lin_interpolate();
else if (quality == 1 || colors > 3)
vng_interpolate();
else if (quality == 2 && filters > 1000)
ppg_interpolate();
else if (filters == 9)
xtrans_interpolate (quality*2-3);
else
ahd_interpolate();
}
if (mix_green)
for (colors=3, i=0; i < height*width; i++)
image[i][1] = (image[i][1] + image[i][3]) >> 1;
if (!is_foveon && colors == 3) median_filter();
if (!is_foveon && highlight == 2) blend_highlights();
if (!is_foveon && highlight > 2) recover_highlights();
if (use_fuji_rotate) fuji_rotate();
#ifndef NO_LCMS
if (cam_profile) apply_profile (cam_profile, out_profile);
#endif
convert_to_rgb();
if (use_fuji_rotate) stretch();
thumbnail:
if (write_fun == &CLASS jpeg_thumb)
write_ext = ".jpg";
else if (output_tiff && write_fun == &CLASS write_ppm_tiff)
write_ext = ".tiff";
else
write_ext = ".pgm\0.ppm\0.ppm\0.pam" + colors*5-5;
ofname = (char *) malloc (strlen(ifname) + 64);
merror (ofname, "main()");
if (write_to_stdout)
strcpy (ofname,_("standard output"));
else {
#ifdef LIGHTZONE
if (ofbase)
strcpy (ofname, ofbase);
else
#endif
strcpy (ofname, ifname);
if ((cp = strrchr (ofname, '.'))) *cp = 0;
if (multi_out)
sprintf (ofname+strlen(ofname), "_%0*d",
snprintf(0,0,"%d",is_raw-1), shot_select);
if (thumbnail_only)
strcat (ofname, ".thumb");
strcat (ofname, write_ext);
ofp = fopen (ofname, "wb");
if (!ofp) {
status = 1;
perror (ofname);
goto cleanup;
}
}
if (verbose)
fprintf (stderr,_("Writing data to %s ...\n"), ofname);
(*write_fun)();
fclose(ifp);
if (ofp != stdout) fclose(ofp);
cleanup:
if (meta_data) free (meta_data);
if (ofname) free (ofname);
if (oprof) free (oprof);
if (image) free (image);
if (multi_out) {
if (++shot_select < is_raw) arg--;
else shot_select = 0;
}
}
return status;
}
/* vim:set noet sw=8 ts=8: */
|
zscatter.c | /*! \file
Copyright (c) 2003, The Regents of the University of California, through
Lawrence Berkeley National Laboratory (subject to receipt of any required
approvals from U.S. Dept. of Energy)
All rights reserved.
The source code is distributed under BSD license, see the file License.txt
at the top-level directory.
*/
/*! @file
* \brief Scatter the computed blocks into LU destination.
*
* <pre>
* -- Distributed SuperLU routine (version 6.1.1) --
* Lawrence Berkeley National Lab, Univ. of California Berkeley.
* October 1, 2014
*
* Modified:
* September 18, 2017, enable SIMD vectorized scatter operation.
*
*/
#include <math.h>
#include "superlu_zdefs.h"
static void
zscatter_l_1 (int ib,
int ljb,
int nsupc,
int_t iukp,
int_t* xsup,
int klst,
int nbrow,
int_t lptr,
int temp_nbrow,
int * usub,
int * lsub,
doublecomplex *tempv,
int * indirect_thread,
int_t ** Lrowind_bc_ptr, doublecomplex **Lnzval_bc_ptr,
gridinfo_t * grid)
{
// TAU_STATIC_TIMER_START("SCATTER_LB");
// printf("hello\n");
int_t rel, i, segsize, jj;
doublecomplex *nzval;
int_t *index = Lrowind_bc_ptr[ljb];
int_t ldv = index[1]; /* LDA of the dest lusup. */
int_t lptrj = BC_HEADER;
int_t luptrj = 0;
int_t ijb = index[lptrj];
while (ijb != ib)
{
/* Search for dest block --
blocks are not ordered! */
luptrj += index[lptrj + 1];
lptrj += LB_DESCRIPTOR + index[lptrj + 1];
ijb = index[lptrj];
}
/*
* Build indirect table. This is needed because the
* indices are not sorted for the L blocks.
*/
int_t fnz = FstBlockC (ib);
lptrj += LB_DESCRIPTOR;
for (i = 0; i < index[lptrj - 1]; ++i)
{
rel = index[lptrj + i] - fnz;
indirect_thread[rel] = i;
}
nzval = Lnzval_bc_ptr[ljb] + luptrj;
// tempv =bigV + (cum_nrow + cum_ncol*nbrow);
for (jj = 0; jj < nsupc; ++jj)
{
segsize = klst - usub[iukp + jj];
// printf("segsize %d \n",segsize);
if (segsize) {
/*#pragma _CRI cache_bypass nzval,tempv */
for (i = 0; i < temp_nbrow; ++i) {
rel = lsub[lptr + i] - fnz;
z_sub(&nzval[indirect_thread[rel]], &nzval[indirect_thread[rel]],
&tempv[i]);
// printf("i (src) %d, perm (dest) %d \n",i,indirect_thread[rel]);
#ifdef PI_DEBUG
double zz = 0.0;
// if(!(*(long*)&zz == *(long*)&tempv[i]) )
printf ("(%d %d, %0.3e, %0.3e, %3e ) ", ljb,
nzval - Lnzval_bc_ptr[ljb] + indirect_thread[rel],
nzval[indirect_thread[rel]] + tempv[i],
nzval[indirect_thread[rel]],tempv[i]);
//printing triplets (location??, old value, new value ) if none of them is zero
#endif
}
// printf("\n");
tempv += nbrow;
#ifdef PI_DEBUG
// printf("\n");
#endif
}
nzval += ldv;
// printf("%d\n",nzval );
}
// TAU_STATIC_TIMER_STOP("SCATTER_LB");
} /* zscatter_l_1 */
static void
zscatter_l (
int ib, /* row block number of source block L(i,k) */
int ljb, /* local column block number of dest. block L(i,j) */
int nsupc, /* number of columns in destination supernode */
int_t iukp, /* point to destination supernode's index[] */
int_t* xsup,
int klst,
int nbrow, /* LDA of the block in tempv[] */
int_t lptr, /* Input, point to index[] location of block L(i,k) */
int temp_nbrow, /* number of rows of source block L(i,k) */
int_t* usub,
int_t* lsub,
doublecomplex *tempv,
int* indirect_thread,int* indirect2,
int_t ** Lrowind_bc_ptr, doublecomplex **Lnzval_bc_ptr,
gridinfo_t * grid)
{
int_t rel, i, segsize, jj;
doublecomplex *nzval;
int_t *index = Lrowind_bc_ptr[ljb];
int_t ldv = index[1]; /* LDA of the destination lusup. */
int_t lptrj = BC_HEADER;
int_t luptrj = 0;
int_t ijb = index[lptrj];
while (ijb != ib) /* Search for destination block L(i,j) */
{
luptrj += index[lptrj + 1];
lptrj += LB_DESCRIPTOR + index[lptrj + 1];
ijb = index[lptrj];
}
/*
* Build indirect table. This is needed because the indices are not sorted
* in the L blocks.
*/
int_t fnz = FstBlockC (ib);
int_t dest_nbrow;
lptrj += LB_DESCRIPTOR;
dest_nbrow=index[lptrj - 1];
#if (_OPENMP>=201307)
#pragma omp simd
#endif
for (i = 0; i < dest_nbrow; ++i) {
rel = index[lptrj + i] - fnz;
indirect_thread[rel] = i;
}
#if (_OPENMP>=201307)
#pragma omp simd
#endif
/* can be precalculated? */
for (i = 0; i < temp_nbrow; ++i) { /* Source index is a subset of dest. */
rel = lsub[lptr + i] - fnz;
indirect2[i] =indirect_thread[rel];
}
nzval = Lnzval_bc_ptr[ljb] + luptrj; /* Destination block L(i,j) */
#ifdef __INTEL_COMPILER
#pragma ivdep
#endif
for (jj = 0; jj < nsupc; ++jj) {
segsize = klst - usub[iukp + jj];
if (segsize) {
#if (_OPENMP>=201307)
#pragma omp simd
#endif
for (i = 0; i < temp_nbrow; ++i) {
z_sub(&nzval[indirect2[i]], &nzval[indirect2[i]], &tempv[i]);
}
tempv += nbrow;
}
nzval += ldv;
}
} /* zscatter_l */
static void
zscatter_u (int ib,
int jb,
int nsupc,
int_t iukp,
int_t * xsup,
int klst,
int nbrow, /* LDA of the block in tempv[] */
int_t lptr, /* point to index location of block L(i,k) */
int temp_nbrow, /* number of rows of source block L(i,k) */
int_t* lsub,
int_t* usub,
doublecomplex* tempv,
int_t ** Ufstnz_br_ptr, doublecomplex **Unzval_br_ptr,
gridinfo_t * grid)
{
#ifdef PI_DEBUG
printf ("A(%d,%d) goes to U block \n", ib, jb);
#endif
// TAU_STATIC_TIMER_START("SCATTER_U");
// TAU_STATIC_TIMER_START("SCATTER_UB");
int_t jj, i, fnz, rel;
int segsize;
doublecomplex *ucol;
int_t ilst = FstBlockC (ib + 1);
int_t lib = LBi (ib, grid);
int_t *index = Ufstnz_br_ptr[lib];
/* Reinitilize the pointers to the beginning of the k-th column/row of
* L/U factors.
* usub[] - index array for panel U(k,:)
*/
int_t iuip_lib, ruip_lib;
iuip_lib = BR_HEADER;
ruip_lib = 0;
int_t ijb = index[iuip_lib];
while (ijb < jb) { /* Search for destination block. */
ruip_lib += index[iuip_lib + 1];
// printf("supersize[%ld] \t:%ld \n",ijb,SuperSize( ijb ) );
iuip_lib += UB_DESCRIPTOR + SuperSize (ijb);
ijb = index[iuip_lib];
}
/* Skip descriptor. Now point to fstnz index of block U(i,j). */
iuip_lib += UB_DESCRIPTOR;
// tempv = bigV + (cum_nrow + cum_ncol*nbrow);
for (jj = 0; jj < nsupc; ++jj) {
segsize = klst - usub[iukp + jj];
fnz = index[iuip_lib++];
if (segsize) { /* Nonzero segment in U(k,j). */
ucol = &Unzval_br_ptr[lib][ruip_lib];
// printf("========Entering loop=========\n");
#if (_OPENMP>=201307)
#pragma omp simd
#endif
for (i = 0; i < temp_nbrow; ++i) {
rel = lsub[lptr + i] - fnz;
// printf("%d %d %d %d %d \n",lptr,i,fnz,temp_nbrow,nbrow );
// printf("hello ucol[%d] %d %d : \n",rel,lsub[lptr + i],fnz);
z_sub(&ucol[rel], &ucol[rel], &tempv[i]);
#ifdef PI_DEBUG
double zz = 0.0;
if (!(*(long *) &zz == *(long *) &tempv[i]))
printf ("(%d, %0.3e, %0.3e ) ", rel, ucol[rel] + tempv[i],
ucol[rel]);
//printing triplets (location??, old value, new value ) if none of them is zero
#endif
} /* for i = 0:temp_nbropw */
tempv += nbrow; /* Jump LDA to next column */
#ifdef PI_DEBUG
// printf("\n");
#endif
} /* if segsize */
ruip_lib += ilst - fnz;
} /* for jj = 0:nsupc */
#ifdef PI_DEBUG
// printf("\n");
#endif
// TAU_STATIC_TIMER_STOP("SCATTER_UB");
} /* zscatter_u */
/*Divide CPU-GPU dgemm work here*/
#ifdef PI_DEBUG
int Ngem = 2;
// int_t Ngem = 0;
int min_gpu_col = 6;
#else
// int_t Ngem = 0;
#endif
#ifdef GPU_ACC
void
gemm_division_cpu_gpu(
int* num_streams_used, /*number of streams that will be used */
int* stream_end_col, /*array holding last column blk for each partition */
int * ncpu_blks, /*Number of CPU dgemm blks */
/*input */
int nbrow, /*number of row in A matrix */
int ldu, /*number of k in dgemm */
int nstreams,
int* full_u_cols, /*array containing prefix sum of work load */
int num_blks /*Number of work load */
)
{
int Ngem = sp_ienv_dist(7); /*get_mnk_dgemm ();*/
int min_gpu_col = get_cublas_nb ();
// Ngem = 1000000000;
/*
cpu is to gpu dgemm should be ideally 0:1 ratios to hide the total cost
However since there is gpu latency of around 20,000 ns implying about
200000 floating point calculation be done in that time so ~200,000/(2*nbrow*ldu)
should be done in cpu to hide the latency; we Ngem =200,000/2
*/
int i, j;
// {
// *num_streams_used=0;
// *ncpu_blks = num_blks;
// return;
// }
for (int i = 0; i < nstreams; ++i)
{
stream_end_col[i] = num_blks;
}
*ncpu_blks = 0;
/*easy returns -1 when number of column are less than threshold */
if (full_u_cols[num_blks - 1] < (Ngem / (nbrow * ldu)) || num_blks == 1 )
{
*num_streams_used = 0;
*ncpu_blks = num_blks;
#ifdef PI_DEBUG
printf ("full_u_cols[num_blks-1] %d %d \n",
full_u_cols[num_blks - 1], (Ngem / (nbrow * ldu)));
printf ("Early return \n");
#endif
return;
}
/* Easy return -2 when number of streams =0 */
if (nstreams == 0)
{
*num_streams_used = 0;
*ncpu_blks = num_blks;
return;
/* code */
}
/*find first block where count > Ngem */
for (i = 0; i < num_blks - 1; ++i) /*I can use binary search here */
{
if (full_u_cols[i + 1] > Ngem / (nbrow * ldu))
break;
}
*ncpu_blks = i + 1;
int_t cols_remain =
full_u_cols[num_blks - 1] - full_u_cols[*ncpu_blks - 1];
#ifdef PI_DEBUG
printf ("Remaining cols %d num_blks %d cpu_blks %d \n", cols_remain,
num_blks, *ncpu_blks);
#endif
if (cols_remain > 0)
{
*num_streams_used = 1; /* now atleast one stream would be used */
#ifdef PI_DEBUG
printf ("%d %d %d %d \n", full_u_cols[num_blks - 1],
full_u_cols[*ncpu_blks], *ncpu_blks, nstreams);
#endif
int_t FP_MIN = 200000 / (nbrow * ldu);
int_t cols_per_stream = SUPERLU_MAX (min_gpu_col, cols_remain / nstreams);
cols_per_stream = SUPERLU_MAX (cols_per_stream, FP_MIN);
#ifdef PI_DEBUG
printf ("cols_per_stream :\t%d\n", cols_per_stream);
#endif
int_t cutoff = cols_per_stream + full_u_cols[*ncpu_blks - 1];
for (int_t i = 0; i < nstreams; ++i)
{
stream_end_col[i] = num_blks;
}
j = *ncpu_blks;
for (i = 0; i < nstreams - 1; ++i)
{
int_t st = (i == 0) ? (*ncpu_blks) : stream_end_col[i - 1];
for (j = st; j < num_blks - 1; ++j)
{
#ifdef PI_DEBUG
printf ("i %d, j %d, %d %d ", i, j, full_u_cols[j + 1],
cutoff);
#endif
if (full_u_cols[j + 1] > cutoff)
{
#ifdef PI_DEBUG
printf ("cutoff met \n");
#endif
cutoff = cols_per_stream + full_u_cols[j];
stream_end_col[i] = j + 1;
*num_streams_used += 1;
j++;
break;
}
#ifdef PI_DEBUG
printf ("\n");
#endif
}
}
}
}
void
gemm_division_new (int * num_streams_used, /*number of streams that will be used */
int * stream_end_col, /*array holding last column blk for each partition */
int * ncpu_blks, /*Number of CPU dgemm blks */
/*input */
int nbrow, /*number of row in A matrix */
int ldu, /*number of k in dgemm */
int nstreams,
Ublock_info_t *Ublock_info, /*array containing prefix sum of work load */
int num_blks /*Number of work load */
)
{
int Ngem = sp_ienv_dist(7); /*get_mnk_dgemm ();*/
int min_gpu_col = get_cublas_nb ();
// Ngem = 1000000000;
/*
cpu is to gpu dgemm should be ideally 0:1 ratios to hide the total cost
However since there is gpu latency of around 20,000 ns implying about
200000 floating point calculation be done in that time so ~200,000/(2*nbrow*ldu)
should be done in cpu to hide the latency; we Ngem =200,000/2
*/
int_t i, j;
for (int i = 0; i < nstreams; ++i)
{
stream_end_col[i] = num_blks;
}
*ncpu_blks = 0;
/*easy returns -1 when number of column are less than threshold */
if (Ublock_info[num_blks - 1].full_u_cols < (Ngem / (nbrow * ldu)) || num_blks == 1)
{
*num_streams_used = 0;
*ncpu_blks = num_blks;
return;
}
/* Easy return -2 when number of streams =0 */
if (nstreams == 0)
{
*num_streams_used = 0;
*ncpu_blks = num_blks;
return;
/* code */
}
/*find first block where count > Ngem */
for (i = 0; i < num_blks - 1; ++i) /*I can use binary search here */
{
if (Ublock_info[i + 1].full_u_cols > Ngem / (nbrow * ldu))
break;
}
*ncpu_blks = i + 1;
int_t cols_remain =
Ublock_info [num_blks - 1].full_u_cols - Ublock_info[*ncpu_blks - 1].full_u_cols;
if (cols_remain > 0)
{
*num_streams_used = 1; /* now atleast one stream would be used */
int_t FP_MIN = 200000 / (nbrow * ldu);
int_t cols_per_stream = SUPERLU_MAX (min_gpu_col, cols_remain / nstreams);
cols_per_stream = SUPERLU_MAX (cols_per_stream, FP_MIN);
int_t cutoff = cols_per_stream + Ublock_info[*ncpu_blks - 1].full_u_cols;
for (int_t i = 0; i < nstreams; ++i)
{
stream_end_col[i] = num_blks;
}
j = *ncpu_blks;
for (i = 0; i < nstreams - 1; ++i)
{
int_t st = (i == 0) ? (*ncpu_blks) : stream_end_col[i - 1];
for (j = st; j < num_blks - 1; ++j)
{
if (Ublock_info[j + 1].full_u_cols > cutoff)
{
cutoff = cols_per_stream + Ublock_info[j].full_u_cols;
stream_end_col[i] = j + 1;
*num_streams_used += 1;
j++;
break;
}
}
}
}
}
#endif /* defined GPU_ACC */
|
GB_binop__bxor_uint8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bxor_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__bxor_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__bxor_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__bxor_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bxor_uint8)
// A*D function (colscale): GB (_AxD__bxor_uint8)
// D*A function (rowscale): GB (_DxB__bxor_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__bxor_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__bxor_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxor_uint8)
// C=scalar+B GB (_bind1st__bxor_uint8)
// C=scalar+B' GB (_bind1st_tran__bxor_uint8)
// C=A+scalar GB (_bind2nd__bxor_uint8)
// C=A'+scalar GB (_bind2nd_tran__bxor_uint8)
// C type: uint8_t
// A type: uint8_t
// A pattern? 0
// B type: uint8_t
// B pattern? 0
// BinaryOp: cij = (aij) ^ (bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x) ^ (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BXOR || GxB_NO_UINT8 || GxB_NO_BXOR_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bxor_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__bxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__bxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bxor_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint8_t alpha_scalar ;
uint8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bxor_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bxor_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bxor_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x) ^ (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bxor_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij) ^ (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x) ^ (aij) ; \
}
GrB_Info GB (_bind1st_tran__bxor_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij) ^ (y) ; \
}
GrB_Info GB (_bind2nd_tran__bxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
make_graph.c | /* Copyright (C) 2009-2010 The Trustees of Indiana University. */
/* */
/* Use, modification and distribution is subject to the Boost Software */
/* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */
/* http://www.boost.org/LICENSE_1_0.txt) */
/* */
/* Authors: Jeremiah Willcock */
/* Andrew Lumsdaine */
#include <stdlib.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include <limits.h>
#include <assert.h>
#include <math.h>
#ifdef __MTA__
#include <sys/mta_task.h>
#endif
#ifdef GRAPH_GENERATOR_MPI
#include <mpi.h>
#endif
#ifdef GRAPH_GENERATOR_OMP
#include <omp.h>
#endif
/* Simplified interface to build graphs with scrambled vertices. */
#include "graph_generator.h"
#include "permutation_gen.h"
#include "apply_permutation_mpi.h"
#include "scramble_edges.h"
#include "utils.h"
#ifdef GRAPH_GENERATOR_SEQ
void make_graph(int log_numverts, int64_t desired_nedges, uint64_t userseed1, uint64_t userseed2, const double initiator[4], int64_t* nedges_ptr, int64_t** result_ptr) {
int64_t N, M;
N = (int64_t)pow(GRAPHGEN_INITIATOR_SIZE, log_numverts);
M = desired_nedges;
/* Spread the two 64-bit numbers into five nonzero values in the correct
* range. */
uint_fast32_t seed[5];
make_mrg_seed(userseed1, userseed2, seed);
int64_t nedges = compute_edge_array_size(0, 1, M);
*nedges_ptr = nedges;
#ifdef GRAPHGEN_KEEP_MULTIPLICITIES
generated_edge* edges = (generated_edge*)xcalloc(nedges, sizeof(generated_edge)); /* multiplicity set to 0 for unused edges */
#else
int64_t* edges = (int64_t*)xmalloc(2 * nedges * sizeof(int64_t));
#endif
generate_kronecker(0, 1, seed, log_numverts, M, initiator, edges);
int64_t* vertex_perm = (int64_t*)xmalloc(N * sizeof(int64_t));
int64_t* result;
#ifdef GRAPHGEN_KEEP_MULTIPLICITIES
result = (int64_t*)xmalloc(2 * nedges * sizeof(int64_t));
#else
result = edges;
#endif
*result_ptr = result;
mrg_state state;
mrg_seed(&state, seed);
rand_sort_shared(&state, N, vertex_perm);
int64_t i;
/* Apply vertex permutation to graph, optionally copying into user's result
* array. */
#ifdef GRAPHGEN_KEEP_MULTIPLICITIES
for (i = 0; i < nedges; ++i) {
if (edges[i].multiplicity != 0) {
int64_t v1 = vertex_perm[edges[i].src];
int64_t v2 = vertex_perm[edges[i].tgt];
/* Sort these since otherwise the directions of the permuted edges would
* give away the unscrambled vertex order. */
result[i * 2] = (v1 < v2) ? v1 : v2;
result[i * 2 + 1] = (v1 < v2) ? v2 : v1;
} else {
result[i * 2] = result[i * 2 + 1] = (int64_t)(-1);
}
}
free(edges);
#else
for (i = 0; i < 2 * nedges; i += 2) {
if (edges[i] != (int64_t)(-1)) {
int64_t v1 = vertex_perm[edges[i]];
int64_t v2 = vertex_perm[edges[i + 1]];
/* Sort these since otherwise the directions of the permuted edges would
* give away the unscrambled vertex order. */
edges[i] = (v1 < v2) ? v1 : v2;
edges[i + 1] = (v1 < v2) ? v2 : v1;
}
}
#endif
free(vertex_perm);
/* Randomly mix up the order of the edges. */
scramble_edges_shared(userseed1, userseed2, nedges, edges);
}
#endif /* GRAPH_GENERATOR_SEQ */
#ifdef __MTA__
void make_graph(int log_numverts, int64_t desired_nedges, uint64_t userseed1, uint64_t userseed2, const double initiator[4], int64_t* nedges_ptr, int64_t** result_ptr) {
int64_t N, M;
N = (int64_t)pow(GRAPHGEN_INITIATOR_SIZE, log_numverts);
M = (int64_t)desired_nedges;
/* Spread the two 64-bit numbers into five nonzero values in the correct
* range. */
uint_fast32_t seed[5];
make_mrg_seed(userseed1, userseed2, seed);
int64_t nedges = compute_edge_array_size(0, 1, M);
*nedges_ptr = nedges;
#ifdef GRAPHGEN_KEEP_MULTIPLICITIES
generated_edge* edges = (generated_edge*)xcalloc(nedges, sizeof(generated_edge)); /* multiplicity set to 0 for unused edges */
#else
int64_t* edges = (int64_t*)xmalloc(2 * nedges * sizeof(int64_t));
#endif
int rank, size;
/* The "for all streams" here is in compiler versions >= 6.4 */
#pragma mta use 100 streams
#pragma mta for all streams rank of size
{
double my_initiator[GRAPHGEN_INITIATOR_SIZE * GRAPHGEN_INITIATOR_SIZE]; /* Local copy */
int i;
for (i = 0; i < GRAPHGEN_INITIATOR_SIZE * GRAPHGEN_INITIATOR_SIZE; ++i) {
my_initiator[i] = initiator[i];
}
generate_kronecker(rank, size, seed, log_numverts, M, my_initiator, edges);
}
int64_t* vertex_perm = (int64_t*)xmalloc(N * sizeof(int64_t));
int64_t* result;
#ifdef GRAPHGEN_KEEP_MULTIPLICITIES
result = (int64_t*)xmalloc(2 * nedges * sizeof(int64_t));
#else
result = edges;
#endif
*result_ptr = result;
mrg_state state;
mrg_seed(&state, seed);
rand_sort_shared(&state, N, vertex_perm);
int64_t i;
/* Apply vertex permutation to graph, optionally copying into user's result
* array. */
#ifdef GRAPHGEN_KEEP_MULTIPLICITIES
#pragma mta assert parallel
#pragma mta block schedule
for (i = 0; i < nedges; ++i) {
if (edges[i].multiplicity != 0) {
int64_t v1 = vertex_perm[edges[i].src];
int64_t v2 = vertex_perm[edges[i].tgt];
/* Sort these since otherwise the directions of the permuted edges would
* give away the unscrambled vertex order. */
result[i * 2] = MTA_INT_MIN(v1, v2);
result[i * 2 + 1] = MTA_INT_MAX(v1, v2);
} else {
result[i * 2] = result[i * 2 + 1] = (int64_t)(-1);
}
}
free(edges);
#else
#pragma mta assert parallel
#pragma mta block schedule
for (i = 0; i < 2 * nedges; i += 2) {
if (edges[i] != (int64_t)(-1)) {
int64_t v1 = vertex_perm[edges[i]];
int64_t v2 = vertex_perm[edges[i + 1]];
/* Sort these since otherwise the directions of the permuted edges would
* give away the unscrambled vertex order. */
edges[i] = MTA_INT_MIN(v1, v2);
edges[i + 1] = MTA_INT_MAX(v1, v2);
}
}
#endif
free(vertex_perm);
/* Randomly mix up the order of the edges. */
scramble_edges_shared(userseed1, userseed2, nedges, edges);
}
#endif /* __MTA__ */
#ifdef GRAPH_GENERATOR_OMP
void make_graph(int log_numverts, int64_t desired_nedges, uint64_t userseed1, uint64_t userseed2, const double initiator[4], int64_t* nedges_ptr, int64_t** result_ptr) {
int64_t N, M;
N = (int64_t)pow(GRAPHGEN_INITIATOR_SIZE, log_numverts);
M = desired_nedges;
/* Spread the two 64-bit numbers into five nonzero values in the correct
* range. */
uint_fast32_t seed[5];
make_mrg_seed(userseed1, userseed2, seed);
int64_t nedges = compute_edge_array_size(0, 1, M);
*nedges_ptr = nedges;
#ifdef GRAPHGEN_KEEP_MULTIPLICITIES
generated_edge* edges = (generated_edge*)xcalloc(nedges, sizeof(generated_edge)); /* multiplicity set to 0 for unused edges */
#else
int64_t* edges = (int64_t*)xmalloc(2 * nedges * sizeof(int64_t));
#endif
#pragma omp parallel
{
int rank = omp_get_thread_num(), size = omp_get_num_threads();
generate_kronecker(rank, size, seed, log_numverts, M, initiator, edges);
}
int64_t* vertex_perm = (int64_t*)xmalloc(N * sizeof(int64_t));
int64_t* result;
#ifdef GRAPHGEN_KEEP_MULTIPLICITIES
result = (int64_t*)xmalloc(2 * nedges * sizeof(int64_t));
#else
result = edges;
#endif
*result_ptr = result;
mrg_state state;
mrg_seed(&state, seed);
rand_sort_shared(&state, N, vertex_perm);
int64_t i;
/* Apply vertex permutation to graph, optionally copying into user's result
* array. */
#ifdef GRAPHGEN_KEEP_MULTIPLICITIES
#pragma omp parallel for
for (i = 0; i < nedges; ++i) {
if (edges[i].multiplicity != 0) {
int64_t v1 = vertex_perm[edges[i].src];
int64_t v2 = vertex_perm[edges[i].tgt];
/* Sort these since otherwise the directions of the permuted edges would
* give away the unscrambled vertex order. */
result[i * 2] = (v1 < v2) ? v1 : v2;
result[i * 2 + 1] = (v1 < v2) ? v2 : v1;
} else {
result[i * 2] = result[i * 2 + 1] = (int64_t)(-1);
}
}
free(edges);
#else
#pragma omp parallel for
for (i = 0; i < 2 * nedges; i += 2) {
if (edges[i] != (int64_t)(-1)) {
int64_t v1 = vertex_perm[edges[i]];
int64_t v2 = vertex_perm[edges[i + 1]];
/* Sort these since otherwise the directions of the permuted edges would
* give away the unscrambled vertex order. */
edges[i] = (v1 < v2) ? v1 : v2;
edges[i + 1] = (v1 < v2) ? v2 : v1;
}
}
#endif
free(vertex_perm);
/* Randomly mix up the order of the edges. */
scramble_edges_shared(userseed1, userseed2, nedges, edges);
}
#endif /* GRAPH_GENERATOR_OMP */
#ifdef GRAPH_GENERATOR_MPI
void make_graph(int log_numverts, int64_t desired_nedges, uint64_t userseed1, uint64_t userseed2, const double initiator[4], int64_t* nedges_ptr, int64_t** result_ptr) {
int64_t N, M;
int rank, size;
N = (int64_t)pow(GRAPHGEN_INITIATOR_SIZE, log_numverts);
M = desired_nedges;
/* Spread the two 64-bit numbers into five nonzero values in the correct
* range. */
uint_fast32_t seed[5];
make_mrg_seed(userseed1, userseed2, seed);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
int64_t nedges = compute_edge_array_size(rank, size, M);
#ifdef GRAPHGEN_KEEP_MULTIPLICITIES
generated_edge* local_edges = (generated_edge*)xmalloc(nedges * sizeof(generated_edge));
#else
int64_t* local_edges = (int64_t*)xmalloc(2 * nedges * sizeof(int64_t));
#endif
double start = MPI_Wtime();
generate_kronecker(rank, size, seed, log_numverts, M, initiator, local_edges);
double gen_time = MPI_Wtime() - start;
int64_t* local_vertex_perm = NULL;
mrg_state state;
mrg_seed(&state, seed);
start = MPI_Wtime();
int64_t perm_local_size;
rand_sort_mpi(MPI_COMM_WORLD, &state, N, &perm_local_size, &local_vertex_perm);
double perm_gen_time = MPI_Wtime() - start;
/* Copy the edge endpoints into the result array if necessary. */
int64_t* result;
#ifdef GRAPHGEN_KEEP_MULTIPLICITIES
result = (int64_t*)xmalloc(2 * nedges * sizeof(int64_t));
for (i = 0; i < nedges; ++i) {
if (local_edges[i].multiplicity != 0) {
result[i * 2] = local_edges[i].src;
result[i * 2 + 1] = local_edges[i].tgt;
} else {
result[i * 2] = result[i * 2 + 1] = (int64_t)(-1);
}
}
free(local_edges); local_edges = NULL;
#else
result = local_edges;
*result_ptr = result;
local_edges = NULL; /* Freed by caller */
#endif
/* Apply vertex permutation to graph. */
start = MPI_Wtime();
apply_permutation_mpi(MPI_COMM_WORLD, perm_local_size, local_vertex_perm, N, nedges, result);
double perm_apply_time = MPI_Wtime() - start;
free(local_vertex_perm); local_vertex_perm = NULL;
/* Randomly mix up the order of the edges. */
start = MPI_Wtime();
int64_t* new_result;
int64_t nedges_out;
scramble_edges_mpi(MPI_COMM_WORLD, userseed1, userseed2, nedges, result, &nedges_out, &new_result);
double edge_scramble_time = MPI_Wtime() - start;
free(result); result = NULL;
*result_ptr = new_result;
*nedges_ptr = nedges_out;
if (rank == 0) {
fprintf(stdout, "unpermuted_graph_generation: %f s\n", gen_time);
fprintf(stdout, "vertex_permutation_generation: %f s\n", perm_gen_time);
fprintf(stdout, "vertex_permutation_application: %f s\n", perm_apply_time);
fprintf(stdout, "edge_scrambling: %f s\n", edge_scramble_time);
}
}
#endif
/* PRNG interface for implementations; takes seed in same format as given by
* users, and creates a vector of doubles in a reproducible (and
* random-access) way. */
void make_random_numbers(
/* in */ int64_t nvalues /* Number of values to generate */,
/* in */ uint64_t userseed1 /* Arbitrary 64-bit seed value */,
/* in */ uint64_t userseed2 /* Arbitrary 64-bit seed value */,
/* in */ int64_t position /* Start index in random number stream */,
/* out */ double* result /* Returned array of values */
) {
int64_t i;
uint_fast32_t seed[5];
make_mrg_seed(userseed1, userseed2, seed);
mrg_state st;
mrg_seed(&st, seed);
mrg_skip(&st, 2, 0, 2 * position); /* Each double takes two PRNG outputs */
for (i = 0; i < nvalues; ++i) {
result[i] = mrg_get_double_orig(&st);
}
}
|
GB_unop__identity_fc64_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_fc64_fc32
// op(A') function: GB_unop_tran__identity_fc64_fc32
// C type: GxB_FC64_t
// A type: GxB_FC32_t
// cast: GxB_FC64_t cij = GxB_CMPLX ((double) crealf (aij), (double) cimagf (aij))
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = GxB_CMPLX ((double) crealf (aij), (double) cimagf (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = GxB_CMPLX ((double) crealf (aij), (double) cimagf (aij)) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_fc64_fc32
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) crealf (aij), (double) cimagf (aij)) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) crealf (aij), (double) cimagf (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_fc64_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_subassign_08n.c | //------------------------------------------------------------------------------
// GB_subassign_08n: C(I,J)<M> += A ; no S
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Method 08n: C(I,J)<M> += A ; no S
// M: present
// Mask_comp: false
// C_replace: false
// accum: present
// A: matrix
// S: none
// C not bitmap; C can be full since no zombies are inserted in that case.
// If C is bitmap, then GB_bitmap_assign_M_accum is used instead.
// M, A: not bitmap; Method 08s is used instead if M or A are bitmap.
#include "GB_subassign_methods.h"
//------------------------------------------------------------------------------
// GB_PHASE1_ACTION
//------------------------------------------------------------------------------
// action to take for phase 1 when A(i,j) exists and M(i,j)=1
#define GB_PHASE1_ACTION \
{ \
if (cjdense) \
{ \
/* direct lookup of C(iC,jC) */ \
GB_iC_DENSE_LOOKUP ; \
/* ----[C A 1] or [X A 1]------------------------------- */ \
/* [C A 1]: action: ( =C+A ): apply accum */ \
/* [X A 1]: action: ( undelete ): zombie lives */ \
GB_withaccum_C_A_1_matrix ; \
} \
else \
{ \
/* binary search for C(iC,jC) in C(:,jC) */ \
GB_iC_BINARY_SEARCH ; \
if (cij_found) \
{ \
/* ----[C A 1] or [X A 1]--------------------------- */ \
/* [C A 1]: action: ( =C+A ): apply accum */ \
/* [X A 1]: action: ( undelete ): zombie lives */ \
GB_withaccum_C_A_1_matrix ; \
} \
else \
{ \
/* ----[. A 1]-------------------------------------- */ \
/* [. A 1]: action: ( insert ) */ \
task_pending++ ; \
} \
} \
}
//------------------------------------------------------------------------------
// GB_PHASE2_ACTION
//------------------------------------------------------------------------------
// action to take for phase 2 when A(i,j) exists and M(i,j)=1
#define GB_PHASE2_ACTION \
{ \
ASSERT (!cjdense) ; \
{ \
/* binary search for C(iC,jC) in C(:,jC) */ \
GB_iC_BINARY_SEARCH ; \
if (!cij_found) \
{ \
/* ----[. A 1]-------------------------------------- */ \
/* [. A 1]: action: ( insert ) */ \
GB_PENDING_INSERT_aij ; \
} \
} \
}
//------------------------------------------------------------------------------
// GB_subassign_08n: C(I,J)<M> += A ; no S
//------------------------------------------------------------------------------
GrB_Info GB_subassign_08n
(
GrB_Matrix C,
// input:
const GrB_Index *I,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
const GrB_Index *J,
const int64_t nJ,
const int Jkind,
const int64_t Jcolon [3],
const GrB_Matrix M,
const bool Mask_struct,
const GrB_BinaryOp accum,
const GrB_Matrix A,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (!GB_IS_BITMAP (C)) ;
ASSERT (!GB_IS_BITMAP (M)) ; // Method 08s is used if M is bitmap
ASSERT (!GB_IS_BITMAP (A)) ; // Method 08s is used if A is bitmap
ASSERT (!GB_aliased (C, M)) ; // NO ALIAS of C==M
ASSERT (!GB_aliased (C, A)) ; // NO ALIAS of C==A
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
GB_EMPTY_TASKLIST ;
GB_MATRIX_WAIT_IF_JUMBLED (C) ;
GB_MATRIX_WAIT_IF_JUMBLED (M) ;
GB_MATRIX_WAIT_IF_JUMBLED (A) ;
GB_GET_C ; // C must not be bitmap
int64_t zorig = C->nzombies ;
const int64_t Cnvec = C->nvec ;
const int64_t *restrict Ch = C->h ;
const int64_t *restrict Cp = C->p ;
const bool C_is_hyper = (Ch != NULL) ;
GB_GET_MASK ;
GB_GET_A ;
const int64_t *restrict Ah = A->h ;
GB_GET_ACCUM ;
//--------------------------------------------------------------------------
// Method 08n: C(I,J)<M> += A ; no S
//--------------------------------------------------------------------------
// Time: Close to optimal. Omega (sum_j (min (nnz (A(:,j)), nnz (M(:,j)))),
// since only the intersection of A.*M needs to be considered. If either
// M(:,j) or A(:,j) are very sparse compared to the other, then the shorter
// is traversed with a linear-time scan and a binary search is used for the
// other. If the number of nonzeros is comparable, a linear-time scan is
// used for both. Once two entries M(i,j)=1 and A(i,j) are found with the
// same index i, the entry A(i,j) is accumulated or inserted into C.
// The algorithm is very much like the eWise multiplication of A.*M, so the
// parallel scheduling relies on GB_emult_08_phase0 and GB_ewise_slice.
//--------------------------------------------------------------------------
// Parallel: slice the eWiseMult of Z=A.*M (Method 08n only)
//--------------------------------------------------------------------------
// Method 08n only. If C is sparse, it is sliced for a fine task, so that
// it can do a binary search via GB_iC_BINARY_SEARCH. But if C(:,jC) is
// dense, C(:,jC) is not sliced, so the fine task must do a direct lookup
// via GB_iC_DENSE_LOOKUP. Otherwise a race condition will occur.
// The Z matrix is not constructed, except for its hyperlist (Zh_shallow)
// and mapping to A and M.
// No matrix (C, M, or A) can be bitmap. C, M, A can be sparse/hyper/full,
// in any combination.
int64_t Znvec ;
const int64_t *restrict Zh_shallow = NULL ;
GB_OK (GB_subassign_08n_slice (
&TaskList, &TaskList_size, &ntasks, &nthreads,
&Znvec, &Zh_shallow, &Z_to_A, &Z_to_A_size, &Z_to_M, &Z_to_M_size,
C, I, nI, Ikind, Icolon, J, nJ, Jkind, Jcolon,
A, M, Context)) ;
GB_ALLOCATE_NPENDING_WERK ;
//--------------------------------------------------------------------------
// phase 1: undelete zombies, update entries, and count pending tuples
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
GB_GET_TASK_DESCRIPTOR_PHASE1 ;
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// get A(:,j) and M(:,j)
//------------------------------------------------------------------
int64_t j = GBH (Zh_shallow, k) ;
GB_GET_EVEC (pA, pA_end, pA, pA_end, Ap, Ah, j, k, Z_to_A, Avlen) ;
GB_GET_EVEC (pM, pM_end, pB, pB_end, Mp, Mh, j, k, Z_to_M, Mvlen) ;
//------------------------------------------------------------------
// quick checks for empty intersection of A(:,j) and M(:,j)
//------------------------------------------------------------------
int64_t ajnz = pA_end - pA ;
int64_t mjnz = pM_end - pM ;
if (ajnz == 0 || mjnz == 0) continue ;
int64_t iA_first = GBI (Ai, pA, Avlen) ;
int64_t iA_last = GBI (Ai, pA_end-1, Avlen) ;
int64_t iM_first = GBI (Mi, pM, Mvlen) ;
int64_t iM_last = GBI (Mi, pM_end-1, Mvlen) ;
if (iA_last < iM_first || iM_last < iA_first) continue ;
int64_t pM_start = pM ;
//------------------------------------------------------------------
// get jC, the corresponding vector of C
//------------------------------------------------------------------
GB_GET_jC ;
bool cjdense = (pC_end - pC_start == Cvlen) ;
//------------------------------------------------------------------
// C(I,jC)<M(:,j)> += A(:,j) ; no S
//------------------------------------------------------------------
if (ajnz > 32 * mjnz)
{
//--------------------------------------------------------------
// A(:,j) is much denser than M(:,j)
//--------------------------------------------------------------
for ( ; pM < pM_end ; pM++)
{
if (GB_mcast (Mx, pM, msize))
{
int64_t iA = GBI (Mi, pM, Mvlen) ;
// find iA in A(:,j)
int64_t pright = pA_end - 1 ;
bool found ;
// FUTURE::: exploit dense A(:,j)
GB_BINARY_SEARCH (iA, Ai, pA, pright, found) ;
if (found) GB_PHASE1_ACTION ;
}
}
}
else if (mjnz > 32 * ajnz)
{
//--------------------------------------------------------------
// M(:,j) is much denser than A(:,j)
//--------------------------------------------------------------
// FUTURE::: exploit dense mask
bool mjdense = false ;
for ( ; pA < pA_end ; pA++)
{
int64_t iA = GBI (Ai, pA, Avlen) ;
GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ;
if (mij) GB_PHASE1_ACTION ;
}
}
else
{
//----------------------------------------------------------
// A(:,j) and M(:,j) have about the same # of entries
//----------------------------------------------------------
// linear-time scan of A(:,j) and M(:,j)
while (pA < pA_end && pM < pM_end)
{
int64_t iA = GBI (Ai, pA, Avlen) ;
int64_t iM = GBI (Mi, pM, Mvlen) ;
if (iA < iM)
{
// A(i,j) exists but not M(i,j)
GB_NEXT (A) ;
}
else if (iM < iA)
{
// M(i,j) exists but not A(i,j)
GB_NEXT (M) ;
}
else
{
// both A(i,j) and M(i,j) exist
if (GB_mcast (Mx, pM, msize)) GB_PHASE1_ACTION ;
GB_NEXT (A) ;
GB_NEXT (M) ;
}
}
}
}
GB_PHASE1_TASK_WRAPUP ;
}
//--------------------------------------------------------------------------
// phase 2: insert pending tuples
//--------------------------------------------------------------------------
GB_PENDING_CUMSUM ;
zorig = C->nzombies ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
GB_GET_TASK_DESCRIPTOR_PHASE2 ;
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// get A(:,j) and M(:,j)
//------------------------------------------------------------------
int64_t j = GBH (Zh_shallow, k) ;
GB_GET_EVEC (pA, pA_end, pA, pA_end, Ap, Ah, j, k, Z_to_A, Avlen) ;
GB_GET_EVEC (pM, pM_end, pB, pB_end, Mp, Mh, j, k, Z_to_M, Mvlen) ;
//------------------------------------------------------------------
// quick checks for empty intersection of A(:,j) and M(:,j)
//------------------------------------------------------------------
int64_t ajnz = pA_end - pA ;
int64_t mjnz = pM_end - pM ;
if (ajnz == 0 || mjnz == 0) continue ;
int64_t iA_first = GBI (Ai, pA, Avlen) ;
int64_t iA_last = GBI (Ai, pA_end-1, Avlen) ;
int64_t iM_first = GBI (Mi, pM, Mvlen) ;
int64_t iM_last = GBI (Mi, pM_end-1, Mvlen) ;
if (iA_last < iM_first || iM_last < iA_first) continue ;
int64_t pM_start = pM ;
//------------------------------------------------------------------
// get jC, the corresponding vector of C
//------------------------------------------------------------------
GB_GET_jC ;
bool cjdense = (pC_end - pC_start == Cvlen) ;
if (cjdense) continue ;
//------------------------------------------------------------------
// C(I,jC)<M(:,j)> += A(:,j) ; no S
//------------------------------------------------------------------
if (ajnz > 32 * mjnz)
{
//--------------------------------------------------------------
// A(:,j) is much denser than M(:,j)
//--------------------------------------------------------------
for ( ; pM < pM_end ; pM++)
{
if (GB_mcast (Mx, pM, msize))
{
int64_t iA = GBI (Mi, pM, Mvlen) ;
// find iA in A(:,j)
int64_t pright = pA_end - 1 ;
bool found ;
// FUTURE::: exploit dense A(:,j)
GB_BINARY_SEARCH (iA, Ai, pA, pright, found) ;
if (found) GB_PHASE2_ACTION ;
}
}
}
else if (mjnz > 32 * ajnz)
{
//--------------------------------------------------------------
// M(:,j) is much denser than A(:,j)
//--------------------------------------------------------------
// FUTURE::: exploit dense mask
bool mjdense = false ;
for ( ; pA < pA_end ; pA++)
{
int64_t iA = GBI (Ai, pA, Avlen) ;
GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ;
if (mij) GB_PHASE2_ACTION ;
}
}
else
{
//----------------------------------------------------------
// A(:,j) and M(:,j) have about the same # of entries
//----------------------------------------------------------
// linear-time scan of A(:,j) and M(:,j)
while (pA < pA_end && pM < pM_end)
{
int64_t iA = GBI (Ai, pA, Avlen) ;
int64_t iM = GBI (Mi, pM, Mvlen) ;
if (iA < iM)
{
// A(i,j) exists but not M(i,j)
GB_NEXT (A) ;
}
else if (iM < iA)
{
// M(i,j) exists but not A(i,j)
GB_NEXT (M) ;
}
else
{
// both A(i,j) and M(i,j) exist
if (GB_mcast (Mx, pM, msize)) GB_PHASE2_ACTION ;
GB_NEXT (A) ;
GB_NEXT (M) ;
}
}
}
}
GB_PHASE2_TASK_WRAPUP ;
}
//--------------------------------------------------------------------------
// finalize the matrix and return result
//--------------------------------------------------------------------------
GB_SUBASSIGN_WRAPUP ;
}
|
GB_binop__rdiv_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__rdiv_int32
// A.*B function (eWiseMult): GB_AemultB__rdiv_int32
// A*D function (colscale): GB_AxD__rdiv_int32
// D*A function (rowscale): GB_DxB__rdiv_int32
// C+=B function (dense accum): GB_Cdense_accumB__rdiv_int32
// C+=b function (dense accum): GB_Cdense_accumb__rdiv_int32
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__rdiv_int32
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__rdiv_int32
// C=scalar+B GB_bind1st__rdiv_int32
// C=scalar+B' GB_bind1st_tran__rdiv_int32
// C=A+scalar GB_bind2nd__rdiv_int32
// C=A'+scalar GB_bind2nd_tran__rdiv_int32
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = GB_IDIV_SIGNED (bij, aij, 32)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_IDIV_SIGNED (y, x, 32) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_INT32 || GxB_NO_RDIV_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__rdiv_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__rdiv_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__rdiv_int32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__rdiv_int32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__rdiv_int32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__rdiv_int32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__rdiv_int32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__rdiv_int32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__rdiv_int32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = Bx [p] ;
Cx [p] = GB_IDIV_SIGNED (bij, x, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__rdiv_int32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = Ax [p] ;
Cx [p] = GB_IDIV_SIGNED (y, aij, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_SIGNED (aij, x, 32) ; \
}
GrB_Info GB_bind1st_tran__rdiv_int32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_SIGNED (y, aij, 32) ; \
}
GrB_Info GB_bind2nd_tran__rdiv_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
operator_tune-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#ifndef MXNET_OPERATOR_OPERATOR_TUNE_INL_H_
#define MXNET_OPERATOR_OPERATOR_TUNE_INL_H_
#include <dmlc/base.h>
#include <dmlc/logging.h>
#include <mshadow/base.h>
#include <atomic>
#include <cstdint>
#include <chrono>
#include <thread>
#include <string>
#include <vector>
#include <algorithm>
#include <list>
#include <random>
#include <unordered_set>
#include "./mxnet_op.h"
#include "./operator_tune.h"
#if (__GNUC__ >= 4 || (__GNUC__ >= 3 && __GNUC_MINOR__ >= 4)) && !defined(__mips__)
# define HAS_CXA_DEMANGLE 1
#else
# define HAS_CXA_DEMANGLE 0
#endif
#if HAS_CXA_DEMANGLE
#include <cxxabi.h>
#endif
namespace mxnet {
namespace op {
#ifndef MXNET_NO_INLINE
#ifdef _MSC_VER
#define MXNET_NO_INLINE __declspec(noinline)
#else
#define MXNET_NO_INLINE __attribute__((noinline))
#endif
#endif // MXNET_NO_INLINE
#define OUTSIDE_COUNT_SHIFT 9
namespace tune {
/*!
* \brief Convert TuningMode value to a string representation
* \param tm Scalar TuningMode value
* \return Character pointer to a string representing the TuningMode value
*/
inline const char *TuningModeToString(const TuningMode tm) {
switch (tm) {
case kAuto:
return "Auto";
case kNeverOMP:
return "NeverOMP";
case kAlwaysOMP:
return "AlwaysOMP";
default:
CHECK(false) << "Unknown TuningMode type: " << static_cast<int>(tm);
return "<unknown>";
}
}
} // namespace tune
/*!
* \brief Engine to tune kernel operations
* \tparam DType Data type to be used when tuning the kernel operations
* \remarks The basic concept here is that we time how long a trivial loop takes with and without
* OMP, subtracting the non-OMP run from the OMP run, which gives us the time
* that the OMP overhead takes. Times were found to be relatively invariant with
* regard ot the number of threads/cores on a given machine.
* Secondly, supplied operators are run and timed (for each data type) in order to determine
* their individual time cost.
*
* Knowing the following items, we can determine how long the OMP and non-OMP run
* is expected to take:
* 1) OMP overhead time
* 2) Number of iterations required
* 3) Number of threads to be used if we choose the OMP method
* 4) The data type
*
* Therefore, at Kernel::Launch() time, we can estimate whether it is faster to use OMP or not
* for the given kernel operator.
*
* Results and efficiency of the tuning is tested in the gtest OMP_TUNING test suite
*/
template<typename DType>
class OperatorTune : public OperatorTuneByType<DType> {
public:
using Tick = OperatorTuneBase::Tick;
using duration_t = OperatorTuneBase::duration_t;
using OperatorTuneByType<DType>::tuning_mode_;
/*!
* \brief Constructor
*/
OperatorTune() {
TuneAll();
}
/*!
* \brief Initialize the OperatorTune object
* \return Whether the OperatorTune object was successfully initialized
*/
static bool Initialize() {
if (!initialized_) {
initialized_ = true;
// Generate some random data for calling the operator kernels
data_set_.reserve(0x100);
std::random_device rd;
std::mt19937 gen(rd());
if (!std::is_integral<DType>::value) {
std::uniform_real_distribution<> dis(-1, 1);
for (int n = 0; n < 0x100; ++n) {
const auto val = static_cast<DType>(dis(gen));
// If too close to zero, try again
if (std::fabs(static_cast<double>(val)) < 1e-5) {
--n;
continue;
}
data_set_.emplace_back(val);
}
} else {
std::uniform_int_distribution<> dis(-128, 127);
for (int n = 0; n < 0x100; ++n) {
const auto val = static_cast<DType>(dis(gen));
// If zero, try again
if (!val) {
--n;
continue;
}
data_set_.emplace_back(val);
}
}
// Use this environment variable to generate new tuning statistics
// In order to avoid printing too many copies, only the float32 object prints
output_tuning_data_ = mshadow::DataType<DType>::kFlag == mshadow::kFloat32
&& dmlc::GetEnv("MXNET_OUTPUT_TUNING_DATA", false);
// If outputting tuning data, then also output verbose logging info
OperatorTuneBase::verbose_tuning_info_ = dmlc::GetEnv("MXNET_VERBOSE_TUNING_INFO", false);
OperatorTuneBase::tuning_weight_scale_ = dmlc::GetEnv("MXNET_TUNING_WEIGHT_SCALE", 0.0);
// This isn't actually supposed to be multithreaded init, but just to be sure the change is
// seen everywhere, using atomic bool.
if (!OperatorTuneBase::calculated_.load()) {
// Not especially concerned with a race condition, since this hsould
// run when only one thread is active (static init), just don't cache this variable
OperatorTuneBase::calculated_.store(true);
OperatorTuneBase::omp_overhead_ns_ = GetOMPLoopOverhead();
std::string config = dmlc::GetEnv("MXNET_USE_OPERATOR_TUNING", std::string());
ParseEnablerConfig(config);
}
if (OperatorTuneBase::verbose_tuning_info_) {
LOG(INFO) << "OMP overhead: " << OperatorTuneBase::omp_overhead_ns_ << " nanoseconds";
}
}
return true;
}
/*!
* \brief Schedule a tuning run
* \tparam OP Operator to tune
* \param tune_func Function to call which tunes the operator
* \return true if the tune operation was scheduled
*/
template<typename OP>
static bool ScheduleTune(void (*tune_func)()) {
#ifdef MXNET_USE_OPERATOR_TUNING
if (tune_func) {
GetTuningList()->push_back(tune_func);
operator_names_.insert(demangle(typeid(OP).name()));
return true;
}
return false;
#else
return true;
#endif
}
/*!
* \brief Is the template parameter type a tuned kernel?
* \tparam OP kernel operator type
* \return true if the operator/kernel is tuned
*/
template<typename OP>
static bool IsTuned() {
return operator_names_.find(demangle(typeid(OP).name())) != operator_names_.end();
}
/*!\
* \brief Tune all registered kernel operators that haven't already been tuned
*/
static bool TuneAll() {
Initialize();
std::list<void (*)()> *tl = GetTuningList();
const size_t size_save = tl->size(); // For checking if anything asynchronous is
// adding or removing items, which is forbidden
if (output_tuning_data_ && !tl->empty()) {
// Only emit this once, use the most common case, 'float32'
if (mshadow::DataType<DType>::kFlag == mshadow::kFloat32) {
std::cout << "OperatorTuneBase::duration_t "
<< "OperatorTuneBase::omp_overhead_ns_ = " << OperatorTuneBase::omp_overhead_ns_
<< ";" << std::endl << std::flush;
}
}
const Tick start = std::chrono::high_resolution_clock::now();
for (auto i : *tl) {
(*i)();
}
if (OperatorTuneBase::verbose_tuning_info_) {
const duration_t duration = OperatorTune::GetDurationInNanoseconds(start);
LOG(INFO) << "Op Tuning for " << type_name<DType>()
<< " took " << (duration / 1000000) << " ms";
}
CHECK_EQ(size_save, tl->size()) << "Tuning list size should not have changed while tuning";
tl->clear();
return true;
}
/*!
* \brief Return set of operator names that were registered to be tuned. Does not imply
* that the operator has been tuned.
* \return Set of operator/kernel names that were registered for tuning
*/
static const std::unordered_set<std::string>& TunedOperatorNames() {
return operator_names_;
}
protected:
/*!
* \brief Get the list of tuning function calls for the operators
* \return Pointer to list of tuning function calls
*/
static std::list<void (*)()> *GetTuningList();
/*!
* \brief Demangle typeid::name() in order to generate source macros
* \param name C++ Mangled name
* \return Demangled name as string
*/
static inline std::string demangle(const char *name) {
#if HAS_CXA_DEMANGLE
int status = -4; // some arbitrary value to eliminate the compiler warning
std::unique_ptr<char, void (*)(void *)> res{
abi::__cxa_demangle(name, nullptr, nullptr, &status),
&std::free
};
return status ? name : res.get();
#else
return name;
#endif
}
/*!
* \brief Type name as string
* \tparam T Type
* \return std::string representing the human-readable demangled type name
*/
template<typename T> static inline std::string type_name() {
return demangle(typeid(T).name());
}
/*! \brief Measure OMP overhead for a trivial OMP loop using all cores
* \param omp_thread_count - Number of OMP threads to use in the timing test
* \returns Duration in nanoseconds for the OMP overhead (time to initiate and close the
* OMP session)
*/
static duration_t GetOMPLoopOverhead(const size_t omp_thread_count) {
CHECK_GT(omp_thread_count, 1); // Don't try to use OMP for one thread
int wl_count = OperatorTuneBase::WORKLOAD_COUNT;
Tick start = std::chrono::high_resolution_clock::now();
// Use two loops in order to simulate OMP outside timing
for (size_t i = 0; i < OUTSIDE_COUNT; ++i) {
for (int x = 0; x < wl_count; ++x) {
// trivial operation
volatile_int_ += x;
}
}
const OperatorTuneBase::duration_t no_omp_duration =
OperatorTuneBase::GetDurationInNanoseconds(start);
// Scale OMP iterations by type calculation complexity
double factor;
// if tuning_weight_scale_ is a number that looks valid, use it as the factor
if (OperatorTuneBase::tuning_weight_scale_ > 0.01) {
factor = OperatorTuneBase::tuning_weight_scale_;
} else {
// These are empirically-determined constants found by balancing between
// a desktop (8 & 12 cpu's) and large cloud instances (32 & 64 cpu's)
switch (mshadow::DataType<DType>::kFlag) {
case mshadow::kUint8:
case mshadow::kInt8:
factor = 8.5;
break;
case mshadow::kInt32:
factor = 4.5;
break;
case mshadow::kInt64:
factor = 2;
break;
case mshadow::kFloat64:
factor = 1.25;
break;
case mshadow::kFloat32:
default:
factor = 1.0;
break;
}
}
wl_count = static_cast<int>(factor * OperatorTuneBase::WORKLOAD_COUNT * omp_thread_count);
start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < OUTSIDE_COUNT; ++i) {
#pragma omp parallel for num_threads(omp_thread_count)
for (int x = 0; x < wl_count; ++x) {
// trivial operation
volatile_int_ += x;
}
}
const duration_t omp_duration = OperatorTuneBase::GetDurationInNanoseconds(start)
- no_omp_duration;
return omp_duration >> OUTSIDE_COUNT_SHIFT;
}
/*! \brief Measure OMP overhead for a trivial OMP loop using all cores
* \returns Time in nanoseconds to initialize/cleanup when excuting an OMP block
*/
static duration_t GetOMPLoopOverhead() {
// It was found empirically that OMP times was not heavily tied to number of cores,
// so take an average across all core counts
const auto max_cores = static_cast<size_t>(omp_get_num_procs()) >> 1;
if (max_cores >= 2) {
std::vector<duration_t> core_times;
// Take care of any OMP lazy-init with a throwaway call
for (size_t omp_threads = 2; omp_threads <= max_cores; ++omp_threads) {
GetOMPLoopOverhead(omp_threads);
}
std::vector<duration_t> durations;
durations.reserve(max_cores - 1);
for (size_t omp_threads = 2; omp_threads <= max_cores; ++omp_threads) {
const duration_t duration = GetOMPLoopOverhead(omp_threads);
if (OperatorTuneBase::verbose_tuning_info_) {
LOG(INFO) << "OMP Thread Count: " << omp_threads << ", overhead: " << duration << " ns";
}
durations.emplace_back(duration);
}
// return median
std::sort(durations.begin(), durations.end());
return durations[durations.size() >> 1];
}
return INT_MAX; // If only one core, then never use OMP (say the overhead is huge)
}
/*!
* \brief Some string utility functions that aren't specific to tuning
*/
struct StringUtil {
/*!
* \brief Terim whitespace from beninning and end of string
* \param s String to trimp
* \return reference to the modified string. This is the same std::string object as what was
* supplied in the parameters
*/
static std::string &trim(std::string *s) {
s->erase(s->begin(), std::find_if(s->begin(), s->end(), [](int ch) {
return !std::isspace(ch);
}));
s->erase(std::find_if(s->rbegin(), s->rend(), [](int ch) {
return !std::isspace(ch);
}).base(), s->end());
return *s;
}
/*!
* \brief Tokenize a string into a list of tokens
* \param s String to tokenize
* \return std::list of tokens
*/
static std::list<std::string> string2list(const std::string &s) {
std::list<std::string> res;
std::istringstream iss(s);
std::string token;
while (std::getline(iss, token, ',')) {
trim(&token);
if (!token.empty()) {
res.push_back(token);
}
}
return res;
}
};
/*!
* \brief Get data type from string representation
* \warning Do not call from a performance-sensitive area
*/
static int type_from_string(const std::string& type_string) {
if (type_string == "float32")
return mshadow::kFloat32;
if (type_string == "float64")
return mshadow::kFloat64;
if (type_string == "float16")
return mshadow::kFloat16;
if (type_string == "int8")
return mshadow::kInt8;
if (type_string == "uint8")
return mshadow::kUint8;
if (type_string == "int32")
return mshadow::kInt32;
if (type_string == "int64")
return mshadow::kInt64;
return -1; // invalid
}
/*!
* \brief Parse MXNET_ENABLE_OPERATOR_TUNING environment variable
* \param config String representation of MXNET_ENABLE_OPERATOR_TUNING environment variable
* Values:
* 0=disable all
* 1=enable all
* float32, float16, float32=list of types to enable, and disable those not listed
*/
static void ParseEnablerConfig(std::string config) {
StringUtil::trim(&config);
if (!config.empty()) {
// First disable all
OperatorTuneByType<float>::set_tuning_mode(tune::kAlwaysOMP);
OperatorTuneByType<double>::set_tuning_mode(tune::kAlwaysOMP);
OperatorTuneByType<int8_t>::set_tuning_mode(tune::kAlwaysOMP);
OperatorTuneByType<uint8_t>::set_tuning_mode(tune::kAlwaysOMP);
OperatorTuneByType<int32_t>::set_tuning_mode(tune::kAlwaysOMP);
OperatorTuneByType<int64_t>::set_tuning_mode(tune::kAlwaysOMP);
// See if it's a non-number (ie type or list of types)
if (!::isdigit(config[0])) {
OperatorTuneByType<mshadow::half::half_t>::set_tuning_mode(tune::kAuto);
std::list<std::string> tokens = StringUtil::string2list(config);
for (const std::string& stype : tokens) {
// We don't have an enum for halt_t
const int typ = type_from_string(stype);
if (typ >= 0) {
switch (typ) {
case mshadow::kFloat32:
OperatorTuneByType<float>::set_tuning_mode(tune::kAuto);
break;
case mshadow::kFloat64:
OperatorTuneByType<double>::set_tuning_mode(tune::kAuto);
break;
case mshadow::kFloat16:
OperatorTuneByType<mshadow::half::half_t>::set_tuning_mode(tune::kAuto);
break;
case mshadow::kInt8:
OperatorTuneByType<int8_t>::set_tuning_mode(tune::kAuto);
break;
case mshadow::kUint8:
OperatorTuneByType<uint8_t>::set_tuning_mode(tune::kAuto);
break;
case mshadow::kInt32:
OperatorTuneByType<int32_t>::set_tuning_mode(tune::kAuto);
break;
case mshadow::kInt64:
OperatorTuneByType<int64_t>::set_tuning_mode(tune::kAuto);
break;
default:
CHECK(false) << "Unsupported tuning data type: " << stype;
break;
}
} else {
// -1 is error
LOG(WARNING) << "Unknown data type to be tuned: " << stype;
}
}
} else {
if (std::atoi(config.c_str()) > 0) {
OperatorTuneByType<float>::set_tuning_mode(tune::kAuto);
OperatorTuneByType<double>::set_tuning_mode(tune::kAuto);
OperatorTuneByType<int8_t>::set_tuning_mode(tune::kAuto);
OperatorTuneByType<uint8_t>::set_tuning_mode(tune::kAuto);
OperatorTuneByType<int32_t>::set_tuning_mode(tune::kAuto);
OperatorTuneByType<int64_t>::set_tuning_mode(tune::kAuto);
OperatorTuneByType<mshadow::half::half_t>::set_tuning_mode(tune::kAuto);
}
}
}
}
/*! \brief Whether this object has been initialized */
static bool initialized_;
/*! \brief Number of passes to obtain an average */
static constexpr duration_t OUTSIDE_COUNT = (1 << OUTSIDE_COUNT_SHIFT);
/*! \brief Random data for timing operator calls */
static std::vector<DType> data_set_;
/*! \brief Operators tuned */
static std::unordered_set<std::string> operator_names_;
/*! \brief Arbitary object to modify in OMP loop */
static volatile int volatile_int_;
/*! \brief Output insertable (into code) instantiation+default-value macros */
static bool output_tuning_data_;
};
/*!
* \brief Class that tunes unary operators
* \tparam DType Data type to be used when tuning the kernel operations
*/
template<typename DType>
class UnaryOpTune : public OperatorTune<DType> {
protected:
typedef OperatorTune<DType> Super;
using duration_t = typename Super::duration_t;
using Tick = typename Super::Tick;
/*!
* \brief Determine the time it takes a kernel operator to execute WORKLOAD_COUNT iterations
* Used for kernels that take no arguments (ie set_zero)
* \tparam OP Kernel operator
* \return Duration in nanoseconds for the 'WORKLOAD_COUNT' operations
*/
template<typename OP>
static duration_t GetBlankWorkload() {
DType tmp;
volatile DType *res = &tmp;
const Tick start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < Super::WORKLOAD_COUNT; ++i) {
// Use a logical AND instead of mod to avoid affecting the timing result with a slow divide
*res += OP::Map();
}
const duration_t omp_duration = Super::GetDurationInNanoseconds(start);
return omp_duration ? omp_duration : 1;
}
/*!
* \brief Determine the time it takes a kernel operator to execute WORKLOAD_COUNT iterations
* Used for kernels that take one argument (ie sqrt())
* \tparam OP Kernel operator
* \return Duration in nanoseconds for the 'WORKLOAD_COUNT' operations
*/
template<typename OP>
static duration_t GetUnaryWorkload() {
DType tmp;
volatile DType *res = &tmp;
const Tick start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < Super::WORKLOAD_COUNT; ++i) {
// Use a logical AND instead of mod to avoid affecting the timing result with a slow divide
*res = OP::Map(Super::data_set_[i & 0xFF]);
}
const duration_t omp_duration = Super::GetDurationInNanoseconds(start);
return omp_duration ? omp_duration : 1;
}
/*!
* \brief Determine the time it takes a kernel operator to execute WORKLOAD_COUNT iterations
* Used for kernels that take two arguments (ie elemwise_add())
* \tparam OP Kernel operator
* \return Duration in nanoseconds for the 'WORKLOAD_COUNT' operations
*/
template<typename OP>
static inline duration_t GetBinaryWorkload() {
DType tmp;
volatile DType *res = &tmp;
const Tick start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < Super::WORKLOAD_COUNT; ++i) {
// Use a logical AND instead of mod to avoid affecting the timing result with a slow divide
*res = OP::Map(Super::data_set_[i & 0xFF], Super::data_set_[(i + 1) & 0xFF]);
}
const duration_t omp_duration = Super::GetDurationInNanoseconds(start);
return omp_duration ? omp_duration : 1;
}
/*!
* \brief Determine the time it takes a kernel operator to execute WORKLOAD_COUNT iterations
* Used for kernels that take three arguments (ie backwards_grad<elemwise_add>())
* \tparam OP Kernel operator
* \return Duration in nanoseconds for the 'WORKLOAD_COUNT' operations
*/
template<typename OP>
static duration_t GetTertiaryWorkload() {
DType tmp;
volatile DType *res = &tmp;
const Tick start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < Super::WORKLOAD_COUNT; ++i) {
// Use a logical AND instead of mod to avoid affecting the timing result with a slow divide
*res = OP::Map(Super::data_set_[i & 0xFF],
Super::data_set_[(i + 1) & 0xFF],
Super::data_set_[i & 0xFF]);
}
const duration_t omp_duration = Super::GetDurationInNanoseconds(start);
return omp_duration ? omp_duration : 1;
}
/*!
* \brief Determine the time it takes a kernel operator to execute WORKLOAD_COUNT iterations
* Used for mxnet-like kernels that take no arguments)
* \tparam OP Kernel operator
* \return Duration in nanoseconds for the 'WORKLOAD_COUNT' operations
*/
template<typename OP>
static duration_t GetBlankWorkloadEx() {
std::unique_ptr<DType> tmp(new DType[Super::WORKLOAD_COUNT]);
DType *tmp_ptr = tmp.get();
const Tick start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < Super::WORKLOAD_COUNT; ++i) {
OP::Map(i, tmp_ptr);
}
const duration_t omp_duration = Super::GetDurationInNanoseconds(start);
return omp_duration ? omp_duration : 1;
}
public:
/*!
* \brief Tune the specified kernel operator. Optionally print out C++ macro that defines the
* tuning data variable and the default tuned value
* This function tunes an operator which takes no arguments
* \tparam OP The kernel operator to be tuned
*/
template<typename OP>
static void TuneBlankOperator() {
mxnet::op::mxnet_op::tuned_op<OP, DType>::workload_[0] = GetBlankWorkload<OP>();
if (Super::output_tuning_data_) {
std::cout << "IMPLEMENT_UNARY_WORKLOAD_FWD("
<< Super::template type_name<OP>()
<< "); // NOLINT()" << std::endl << std::flush; // For long lines
}
}
/*!
* \brief Tune the specified kernel operator. Optionally print out C++ macro that defines the
* tuning data variable and the default tuned value
* This function tunes an operator which takes one argument
* \tparam OP The kernel operator to be tuned
*/
template<typename OP>
static void TuneUnaryOperator() {
mxnet::op::mxnet_op::tuned_op<OP, DType>::workload_[0] = GetUnaryWorkload<OP>();
if (Super::output_tuning_data_) {
std::cout << "IMPLEMENT_UNARY_WORKLOAD_FWD("
<< Super::template type_name<OP>()
<< "); // NOLINT()" << std::endl << std::flush; // For long lines
}
}
/*!
* \brief Tune the specified kernel operator. Optionally print out C++ macro that defines the
* tuning data variable and the default tuned value
* This function tunes a backward operator which takes one argument
* \tparam OP The kernel operator to be tuned
*/
template<typename OP>
static void TuneUnaryBackwardOperator() {
mxnet::op::mxnet_op::tuned_op<mxnet_op::backward_grad_tuned<OP>, DType>::workload_[0] =
GetBinaryWorkload<mxnet::op::mxnet_op::backward_grad_tuned<OP>>();
if (Super::output_tuning_data_) {
std::cout << "IMPLEMENT_UNARY_WORKLOAD_BWD("
<< Super::template type_name<OP>()
<< "); // NOLINT()" << std::endl << std::flush; // For long lines
}
}
/*!
* \brief Tune the specified "mxnet_op-type" kernel operator.
* Optionally print out C++ macro that defines the
* tuning data variable and the default tuned value
* This function tunes an operator which takes no arguments
* \tparam OP The kernel operator to be tuned
*/
template<typename OP>
static void TuneBlankOperatorEx() {
mxnet::op::mxnet_op::tuned_op<OP, DType>::workload_[0] = GetBlankWorkloadEx<OP>();
if (Super::output_tuning_data_) {
std::cout << "IMPLEMENT_BLANK_WORKLOAD_FWD("
<< Super::template type_name<OP>()
<< "); // NOLINT()" << std::endl << std::flush; // For long lines
}
}
/*!
* \brief Determine whether to use OMP based upon both timing and configuration using the
* given (templated) operator's workload
* \tparam OP Operator whose workload to use (tuned_op::workload_[0])
* \param N Number of iterations desired
* \param thread_count Number of OMP threads available to perform the iterations
* \returns Whether it's faster to use OMP for these iterations
*/
template<typename OP>
inline static bool UseOMP(size_t N, size_t thread_count) {
return OperatorTune<DType>::UseOMP(N,
thread_count,
static_cast<uint64_t>(N) * OP::workload_[0]);
}
};
/*!
* \brief Class that tunes binary and unary operators
* \tparam DType Data type to be used when tuning the kernel operations
*/
template<typename DType>
class BinaryOpTune : public UnaryOpTune<DType> {
protected:
typedef UnaryOpTune<DType> Super;
public:
/*!
* \brief Tune a generic binary operator
* @tparam OP - Operator type
*/
template<typename OP>
static void TuneBinaryOperator() {
mxnet_op::tuned_op<OP, DType>::workload_[0] = Super::template GetBinaryWorkload<OP>();
if (Super::Super::output_tuning_data_) {
std::cout << "IMPLEMENT_BINARY_WORKLOAD_FWD("
<< Super::template type_name<OP>()
<< "); // NOLINT()" << std::endl << std::flush; // For long lines
}
}
/*!
* \brief Tune binary backward operator
* \tparam OP - operator
*/
template<typename OP>
static void TuneBinaryBackwardOperator() {
mxnet::op::mxnet_op::tuned_op<mxnet_op::backward_grad_tuned<OP>, DType>::workload_[0] =
Super::template GetTertiaryWorkload<mxnet::op::mxnet_op::backward_grad_tuned<OP>>();
if (Super::Super::output_tuning_data_) {
std::cout << "IMPLEMENT_BINARY_WORKLOAD_BWD("
<< Super::template type_name<OP>()
<< "); // NOLINT()" << std::endl << std::flush; // For long lines
}
}
};
#undef OUTSIDE_COUNT_SHIFT
#undef WORKLOAD_COUNT_SHIFT
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_OPERATOR_TUNE_INL_H_
|
RCCE_lib_pwr.h | //
// Copyright 2010 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#define RC_GLOBAL_CLOCK_MHZ 1600
#define RPC_ROOT 0
#define RC_NUM_VOLTAGE_DOMAINS 6
#define RC_MIN_VOLTAGE_LEVEL 0
#define RC_MAX_VOLTAGE_LEVEL 6
#define RC_MAX_FREQUENCY_DIVIDER 16 // maximum divider value, so lowest F
#define RC_MIN_FREQUENCY_DIVIDER 2 // minimum divider value, so highest F
#define RC_NUM_FREQUENCY_DIVIDERS (RC_MAX_FREQUENCY_DIVIDER+1)
#define RC_NUM_VOLTAGE_LEVELS 7
#define RPC_PHYSICAL_ADDRESS 0xFB000000
#ifndef COPPERRIDGE
/* real latency is probably closer to 8 800 000 */
#define RC_WAIT_CYCLES 1000000
#endif
#define TILEDIVIDER 0x00000080
#define RC_DEFAULT_VOLTAGE_LEVEL 4
#define RC_DEFAULT_FREQUENCY_DIVIDER 3 // corresponds to tile clock of 1600/3
typedef struct {
float volt;
int VID;
int MHz_cap;
} triple;
int RCCE_set_frequency(int);
static int RC_wait_voltage(RCCE_REQUEST *);
static int RC_set_frequency_divider(int, int);
long long RC_global_clock(void);
unsigned int FID_word(int, int);
unsigned int VID_word(int, int);
#ifdef RC_POWER_MANAGEMENT
#ifndef COPPERRIDGE
typedef struct {
volatile int queue[RC_NUM_VOLTAGE_DOMAINS];
volatile long long start_time;
} RCCE_RPC_REGULATOR;
#define REGULATOR_LENGTH (PAD32byte(sizeof(RCCE_RPC_REGULATOR)))
extern RCCE_RPC_REGULATOR *RCCE_RPC_regulator;
extern long long RC_time_at_birth;
#endif
extern RCCE_COMM RCCE_V_COMM;
extern RCCE_COMM RCCE_F_COMM;
extern RCCE_COMM RCCE_P_COMM;
extern int RCCE_ue_F_masters[4];
extern int RCCE_set_power_active;
extern int RC_current_voltage_level;
extern int RC_current_frequency_divider;
int RCCE_init_RPC(int *, int, int);
#endif
#ifdef _OPENMP
#pragma omp threadprivate (RCCE_ue_F_masters)
#pragma omp threadprivate (RCCE_V_COMM, RCCE_F_COMM, RCCE_P_COMM)
#pragma omp threadprivate (RCCE_set_power_active)
#pragma omp threadprivate (RC_current_voltage_level, RC_current_frequency_divider)
#ifndef COPPERRIDGE
#pragma omp threadprivate (RC_time_at_birth)
#pragma omp threadprivate (RCCE_RPC_regulator)
#endif
#endif
|
floorplan.balance.c | #include "hclib.h"
int ____num_tasks[32] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
/**********************************************************************************************/
/* This program is part of the Barcelona OpenMP Tasks Suite */
/* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */
/* Copyright (C) 2009 Universitat Politecnica de Catalunya */
/* */
/* This program is free software; you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation; either version 2 of the License, or */
/* (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */
/**********************************************************************************************/
/* Original code from the Application Kernel Matrix by Cray */
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include "app-desc.h"
#include "bots.h"
#define ROWS 64
#define COLS 64
#define DMAX 64
#define max(a, b) ((a > b) ? a : b)
#define min(a, b) ((a < b) ? a : b)
int solution = -1;
typedef int coor[2];
typedef char ibrd[ROWS * COLS];
typedef char (*pibrd)[COLS];
FILE * inputFile;
struct cell {
int n;
coor *alt;
int top;
int bot;
int lhs;
int rhs;
int left;
int above;
int next;
};
struct cell * gcells;
int MIN_AREA;
ibrd BEST_BOARD;
coor MIN_FOOTPRINT;
int N;
/* compute all possible locations for nw corner for cell */
static int starts(int id, int shape, coor *NWS, struct cell *cells) {
int i, n, top, bot, lhs, rhs;
int rows, cols, left, above;
/* size of cell */
rows = cells[id].alt[shape][0];
cols = cells[id].alt[shape][1];
/* the cells to the left and above */
left = cells[id].left;
above = cells[id].above;
/* if there is a vertical and horizontal dependence */
if ((left >= 0) && (above >= 0)) {
top = cells[above].bot + 1;
lhs = cells[left].rhs + 1;
bot = top + rows;
rhs = lhs + cols;
/* if footprint of cell touches the cells to the left and above */
if ((top <= cells[left].bot) && (bot >= cells[left].top) &&
(lhs <= cells[above].rhs) && (rhs >= cells[above].lhs))
{ n = 1; NWS[0][0] = top; NWS[0][1] = lhs; }
else { n = 0; }
/* if there is only a horizontal dependence */
} else if (left >= 0) {
/* highest initial row is top of cell to the left - rows */
top = max(cells[left].top - rows + 1, 0);
/* lowest initial row is bottom of cell to the left */
bot = min(cells[left].bot, ROWS);
n = bot - top + 1;
for (i = 0; i < n; i++) {
NWS[i][0] = i + top;
NWS[i][1] = cells[left].rhs + 1;
}
} else {
/* leftmost initial col is lhs of cell above - cols */
lhs = max(cells[above].lhs - cols + 1, 0);
/* rightmost initial col is rhs of cell above */
rhs = min(cells[above].rhs, COLS);
n = rhs - lhs + 1;
for (i = 0; i < n; i++) {
NWS[i][0] = cells[above].bot + 1;
NWS[i][1] = i + lhs;
} }
return (n);
}
/* lay the cell down on the board in the rectangular space defined
by the cells top, bottom, left, and right edges. If the cell can
not be layed down, return 0; else 1.
*/
static int lay_down(int id, ibrd board, struct cell *cells) {
int i, j, top, bot, lhs, rhs;
top = cells[id].top;
bot = cells[id].bot;
lhs = cells[id].lhs;
rhs = cells[id].rhs;
for (i = top; i <= bot; i++) {
for (j = lhs; j <= rhs; j++) {
if (board[i * COLS + j] == 0) board[i * COLS + j] = (char)id;
else return(0);
} }
return (1);
}
#define read_integer(file,var) if ( fscanf(file, "%d", &var) == EOF ) { bots_message(" Bogus input file\n"); exit(-1); }
static void read_inputs() {
int i, j, n;
read_integer(inputFile,n);
N = n;
gcells = (struct cell *) malloc((n + 1) * sizeof(struct cell));
gcells[0].n = 0;
gcells[0].alt = 0;
gcells[0].top = 0;
gcells[0].bot = 0;
gcells[0].lhs = -1;
gcells[0].rhs = -1;
gcells[0].left = 0;
gcells[0].above = 0;
gcells[0].next = 0;
for (i = 1; i < n + 1; i++) {
read_integer(inputFile, gcells[i].n);
gcells[i].alt = (coor *) malloc(gcells[i].n * sizeof(coor));
for (j = 0; j < gcells[i].n; j++) {
read_integer(inputFile, gcells[i].alt[j][0]);
read_integer(inputFile, gcells[i].alt[j][1]);
}
read_integer(inputFile, gcells[i].left);
read_integer(inputFile, gcells[i].above);
read_integer(inputFile, gcells[i].next);
}
if (!feof(inputFile)) {
read_integer(inputFile, solution);
}
}
static void write_outputs() {
int i, j;
bots_message("Minimum area = %d\n\n", MIN_AREA);
for (i = 0; i < MIN_FOOTPRINT[0]; i++) {
for (j = 0; j < MIN_FOOTPRINT[1]; j++) {
if (BEST_BOARD[i * COLS + j] == 0) {bots_message(" ");}
else bots_message("%c", 'A' + BEST_BOARD[i * COLS + j] - 1);
}
bots_message("\n");
}
}
static int add_cell(int id, coor FOOTPRINT, ibrd BOARD, struct cell *CELLS, int dummy_level) {
int i, j, nn, area, nnc,nnl;
ibrd board;
coor footprint, NWS[DMAX];
nnc = nnl = 0;
/* for each possible shape */
for (i = 0; i < CELLS[id].n; i++) {
/* compute all possible locations for nw corner */
nn = starts(id, i, NWS, CELLS);
nnl += nn;
/* for all possible locations */
for (j = 0; j < nn; j++) {
#ifdef HCLIB_TASK_UNTIED
#pragma omp task private(board, footprint,area) firstprivate(NWS,i,j,id,nn) shared(FOOTPRINT,BOARD,CELLS,MIN_AREA,MIN_FOOTPRINT,N,BEST_BOARD,nnc,bots_verbose_mode) untied
#else
#pragma omp task private(board, footprint,area) firstprivate(NWS,i,j,id,nn) shared(FOOTPRINT,BOARD,CELLS,MIN_AREA,MIN_FOOTPRINT,N,BEST_BOARD,nnc,bots_verbose_mode)
#endif
;
{ ____num_tasks[omp_get_thread_num()]++;
{
struct cell cells[N+1];
memcpy(cells,CELLS,sizeof(struct cell)*(N+1));
/* extent of shape */
cells[id].top = NWS[j][0];
cells[id].bot = cells[id].top + cells[id].alt[i][0] - 1;
cells[id].lhs = NWS[j][1];
cells[id].rhs = cells[id].lhs + cells[id].alt[i][1] - 1;
memcpy(board, BOARD, sizeof(ibrd));
/* if the cell cannot be layed down, prune search */
if (! lay_down(id, board, cells)) {
bots_debug("Chip %d, shape %d does not fit\n", id, i);
goto _end;
}
/* calculate new footprint of board and area of footprint */
footprint[0] = (FOOTPRINT[0] > cells[id].bot+1) ? FOOTPRINT[0] : cells[id].bot + 1;
footprint[1] = (FOOTPRINT[1] > cells[id].rhs+1) ? FOOTPRINT[1] : cells[id].rhs + 1;
area = footprint[0] * footprint[1];
/* if last cell */
if (cells[id].next == 0) {
/* if area is minimum, update global values */
if (area < MIN_AREA) {
#pragma omp critical
;
if (area < MIN_AREA) {
MIN_AREA = area;
MIN_FOOTPRINT[0] = footprint[0];
MIN_FOOTPRINT[1] = footprint[1];
memcpy(BEST_BOARD, board, sizeof(ibrd));
bots_debug("N %d\n", MIN_AREA);
}
}
/* if area is less than best area */
} else if (area < MIN_AREA) {
#pragma omp atomic
;
nnc += add_cell(cells[id].next, footprint, board,cells, 0);
/* if area is greater than or equal to best area, prune search */
} else {
bots_debug("T %d, %d\n", area, MIN_AREA);
}
_end:;
} ; }
}
}
#pragma omp taskwait
;
return nnc+nnl;
}
ibrd board;
void floorplan_init (char *filename)
{
int i,j;
inputFile = fopen(filename, "r");
if(NULL == inputFile) {
bots_message("Couldn't open %s file for reading\n", filename);
exit(1);
}
/* read input file and initialize global minimum area */
read_inputs();
MIN_AREA = ROWS * COLS;
/* initialize board is empty */
for (i = 0; i < ROWS; i++)
for (j = 0; j < COLS; j++) board[i * COLS + j] = 0;
}
void compute_floorplan (void)
{
{
coor footprint;
/* footprint of initial board is zero */
footprint[0] = 0;
footprint[1] = 0;
bots_message("Computing floorplan ");
#pragma omp parallel
;
{
#pragma omp single
;
bots_number_of_tasks = add_cell(1, footprint, board, gcells, 0);
}
bots_message(" completed!\n");
} ; {
int __i;
assert(omp_get_max_threads() <= 32);
for (__i = 0; __i < omp_get_max_threads(); __i++) {
fprintf(stderr, "Thread %d: %d\n", __i, ____num_tasks[__i]);
}
}
}
void floorplan_end (void)
{
/* write results */
write_outputs();
exit(0);
}
int floorplan_verify (void)
{
if (solution != -1 )
return MIN_AREA == solution ? BOTS_RESULT_SUCCESSFUL : BOTS_RESULT_UNSUCCESSFUL;
else
return BOTS_RESULT_NA;
}
|
run_dft_omp.c | #include "dft.h"
#include <omp.h>
#define MASTER 0
#define NTHREADS 8
int main(int argc, char* argv[])
{
int_t len = 0, reps = 0;
#ifdef N
len = (int_t) N;
#else
if(argc > 1)
{
len = (int_t) atoi(argv[1]);
}
else
{
fprintf(stderr, "ERROR: N is undefined and the input length is not submitted.\n");
return EXIT_FAILURE;
}
#endif
#ifdef R
reps = (int_t) R;
#else
if(argc > 2)
{
reps = (int_t) atoi(argv[2]);
}
else
{
fprintf(stderr, "ERROR: R is undefined and number of repetitions is not submitted.\n");
return EXIT_FAILURE;
}
#endif
#pragma omp parallel // num_threads(NTHREADS) proc_bind(spread)
{
int tid = omp_get_thread_num();
/* time measurement */
double t1 = 0.0, t2 = 0.0, total1 = 0.0, total2 = 0.0;
if(tid == MASTER)
{
t1 = get_time();
}
struct ft_data_t *data = calloc (1, sizeof(*data));
/*
TODO: implement an "and"-reduction to ensure that all
threads have initialised their data properly
*/
if(init(data, len) == EXIT_SUCCESS)
{
/* initialising input array with generated data */
setdist_c(data->inout_c, data->len);
if(tid == MASTER)
{
t2 = get_time();
printf("OMP-Version with %d threads\n", omp_get_num_threads());
printf("compiler: %s, problem size: %lu (elements) with real_t = %lu bytes, iterations: %lu\n", COMPILER, (unsigned long) data->len, sizeof(real_t), (unsigned long) reps);
printf("init: %.16f\n", t2 - t1);
#ifdef PRINT_RESULTS
char *f_o = "out/dft_original.txt";
c2r(data->inout_c, data->in_r, data->len);
PRINTR(data->in_r, data->len, f_o);
#endif
total1 = get_time();
}
#pragma omp for
for(int_t i = 0; i < reps; ++i)
{
t1 = get_time();
dft(data->inout_c, data->inout2_c, data->len);
dft(data->inout2_c, data->inout_c, data->len);
norm_c(data->inout_c, data->len, data->len);
t2 = get_time();
}
printf("tid: %lu, time: %.16f\n", (unsigned long)tid, (t2 - t1));
if(tid == MASTER)
{
total2 = get_time();
printf("total dft runtime: %.16f\n", (total2 - total1));
#ifdef PRINT_RESULTS
char *f_r = "out/dft_restored.txt";
c2r(data->inout_c, data->out_r, data->len);
PRINTR(data->out_r, data->len, f_r);
#endif
}
} /* end of init-success */
else
{
fprintf(stderr, "ERROR: thread %d has failed initialisation.\n", tid);
}
/* finalise */
cleanup(data);
free(data);
} /* omp parallel */
return EXIT_SUCCESS;
}
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/Attr.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/LocInfoType.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include <deque>
#include <memory>
#include <string>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
class InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class AttributeList;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class ExternalSemaSource;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class NonNullAttr;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPClause;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class TemplateDeductionInfo;
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Sema - This implements semantic analysis and AST building for C.
class Sema {
Sema(const Sema &) LLVM_DELETED_FUNCTION;
void operator=(const Sema &) LLVM_DELETED_FUNCTION;
///\brief Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///\brief Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
static bool
shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) {
// We are about to link these. It is now safe to compute the linkage of
// the new decl. If the new decl has external linkage, we will
// link it with the hidden decl (which also has external linkage) and
// it will keep having external linkage. If it has internal linkage, we
// will not link it. Since it has no previous decls, it will remain
// with internal linkage.
return !Old->isHidden() || New->isExternallyVisible();
}
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// \brief Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// \brief Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// \brief Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
/// PackContext - Manages the stack for \#pragma pack. An alignment
/// of 0 indicates default alignment.
void *PackContext; // Really a "PragmaPackStack*"
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// \brief Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
enum PragmaVtorDispKind {
PVDK_Push, ///< #pragma vtordisp(push, mode)
PVDK_Set, ///< #pragma vtordisp(mode)
PVDK_Pop, ///< #pragma vtordisp(pop)
PVDK_Reset ///< #pragma vtordisp()
};
enum PragmaMsStackAction {
PSK_Reset, // #pragma ()
PSK_Set, // #pragma ("name")
PSK_Push, // #pragma (push[, id])
PSK_Push_Set, // #pragma (push[, id], "name")
PSK_Pop, // #pragma (pop[, id])
PSK_Pop_Set, // #pragma (pop[, id], "name")
};
/// \brief Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
///
/// The stack always has at least one element in it.
SmallVector<MSVtorDispAttr::Mode, 2> VtorDispModeStack;
/// \brief Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
Slot(llvm::StringRef StackSlotLabel,
ValueType Value,
SourceLocation PragmaLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
explicit PragmaStack(const ValueType &Value)
: CurrentValue(Value) {}
SmallVector<Slot, 2> Stack;
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// \brief This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// \brief Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// ExprNeedsCleanups - True if the current evaluation context
/// requires cleanups to be run at its conclusion.
bool ExprNeedsCleanups;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
/// \brief Store a list of either DeclRefExprs or MemberExprs
/// that contain a reference to a variable (constant) that may or may not
/// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue
/// and discarded value conversions have been applied to all subexpressions
/// of the enclosing full expression. This is cleared at the end of each
/// full expression.
llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs;
/// \brief Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
///
/// This array is never empty. Clients should ignore the first
/// element, which is used to cache a single FunctionScopeInfo
/// that's used to parse every top-level function.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<const NamedDecl*, 16> NamedDeclSetType;
/// \brief Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// \brief A mapping from external names to the most recent
/// locally-scoped extern "C" declaration with that name.
///
/// This map contains external declarations introduced in local
/// scopes, e.g.,
///
/// \code
/// extern "C" void f() {
/// void foo(int, int);
/// }
/// \endcode
///
/// Here, the name "foo" will be associated with the declaration of
/// "foo" within f. This name is not visible outside of
/// "f". However, we still find it in two cases:
///
/// - If we are declaring another global or extern "C" entity with
/// the name "foo", we can find "foo" as a previous declaration,
/// so that the types of this external declaration can be checked
/// for compatibility.
///
/// - If we would implicitly declare "foo" (e.g., due to a call to
/// "foo" in C when no prototype or definition is visible), then
/// we find this declaration of "foo" and complain that it is
/// not visible.
llvm::DenseMap<DeclarationName, NamedDecl *> LocallyScopedExternCDecls;
/// \brief Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// \brief All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// \brief The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// \brief All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// \brief All the overriding destructors seen during a class definition
/// (there could be multiple due to nested classes) that had their exception
/// spec checks delayed, plus the overridden destructor.
SmallVector<std::pair<const CXXDestructorDecl*,
const CXXDestructorDecl*>, 2>
DelayedDestructorExceptionSpecChecks;
/// \brief All the members seen during a class definition which were both
/// explicitly defaulted and had explicitly-specified exception
/// specifications, along with the function type containing their
/// user-specified exception specification. Those exception specifications
/// were overridden with the default specifications, but we still need to
/// check whether they are compatible with the default specification, and
/// we can't do that until the nesting set of class definitions is complete.
SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2>
DelayedDefaultedMemberExceptionSpecs;
typedef llvm::DenseMap<const FunctionDecl *, LateParsedTemplate *>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// \brief Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
LateTemplateParserCB *LateTemplateParser;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP, void *P) {
LateTemplateParser = LTP;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// \brief The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// \brief RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC)
{
S.PushFunctionScope();
S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated);
}
~SynthesizedFunctionScope() {
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::DenseMap<IdentifierInfo*,WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// \brief Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// \brief The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// \brief The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// \brief The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// \brief The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// \brief The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// \brief Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// \brief The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// \brief Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// \brief The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// \brief The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// \brief Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// \brief The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// \brief The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// \brief The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// \brief The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// \brief The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// \brief id<NSCopying> type.
QualType QIDNSCopying;
/// \brief will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// \brief Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum ExpressionEvaluationContext {
/// \brief The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// \brief The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// \brief The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// \brief The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// \brief The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
/// \brief Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// \brief The expression evaluation context.
ExpressionEvaluationContext Context;
/// \brief Whether the enclosing context needed a cleanup.
bool ParentNeedsCleanups;
/// \brief Whether we are in a decltype expression.
bool IsDecltype;
/// \brief The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs;
/// \brief The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// \brief The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// \brief The context information used to mangle lambda expressions
/// and block literals within this context.
///
/// This mangling information is allocated lazily, since most contexts
/// do not have lambda expressions or block literals.
IntrusiveRefCntPtr<MangleNumberingContext> MangleNumbering;
/// \brief If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// \brief If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
bool ParentNeedsCleanups,
Decl *ManglingContextDecl,
bool IsDecltype)
: Context(Context), ParentNeedsCleanups(ParentNeedsCleanups),
IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects),
ManglingContextDecl(ManglingContextDecl), MangleNumbering() { }
/// \brief Retrieve the mangling numbering context, used to consistently
/// number constructs like lambdas for mangling.
MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx);
bool isUnevaluated() const {
return Context == Unevaluated || Context == UnevaluatedAbstract;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// \brief Compute the mangling number context for a lambda expression or
/// block literal.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
/// \param[out] ManglingContextDecl - Returns the ManglingContextDecl
/// associated with the context, if relevant.
MangleNumberingContext *getCurrentMangleNumberContext(
const DeclContext *DC,
Decl *&ManglingContextDecl);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult : public llvm::FastFoldingSetNode {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
/// \brief A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResult> SpecialMemberCache;
/// \brief The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// \brief The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// \brief A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::DenseMap<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::DenseMap<Selector, SourceLocation> ReferencedSelectors;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef std::pair<CXXRecordDecl*, CXXSpecialMember> SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
void ReadMethodPool(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// \brief Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FP_CONTRACT state on entry/exit of compound
/// statements.
class FPContractStateRAII {
public:
FPContractStateRAII(Sema& S)
: S(S), OldFPContractState(S.FPFeatures.fp_contract) {}
~FPContractStateRAII() {
S.FPFeatures.fp_contract = OldFPContractState;
}
private:
Sema& S;
bool OldFPContractState : 1;
};
void addImplicitTypedef(StringRef Name, QualType T);
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// \brief Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///\brief Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// \brief Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// \brief Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// \brief Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// \brief Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// \brief Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// \brief Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// \brief Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
void ActOnEndOfTranslationUnit();
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// \brief This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD,
CapturedRegionKind K);
void
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
const BlockExpr *blkExpr = nullptr);
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const {
if (FunctionScopes.empty())
return nullptr;
for (int e = FunctionScopes.size()-1; e >= 0; --e) {
if (isa<sema::BlockScopeInfo>(FunctionScopes[e]))
continue;
return FunctionScopes[e];
}
return nullptr;
}
template <typename ExprT>
void recordUseOfEvaluatedWeak(const ExprT *E, bool IsRead=true) {
if (!isUnevaluatedContext())
getCurFunction()->recordUseOfWeak(E, IsRead);
}
void PushCompoundScope();
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// \brief Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// \brief Retrieve the current lambda scope info, if any.
sema::LambdaScopeInfo *getCurLambda();
/// \brief Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// \brief Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// \brief Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T,
TypeSourceInfo *ReturnTypeInfo);
/// \brief Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Expr *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExtProtoInfo &EPI);
bool CheckSpecifiedExceptionType(QualType &T, const SourceRange &Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc,
bool *MissingExceptionSpecification = nullptr,
bool *MissingEmptyExceptionSpecification = nullptr,
bool AllowNoexceptAllMatchWithNoSpec = false,
bool IsOperatorNew = false);
bool CheckExceptionSpecSubset(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Superset, SourceLocation SuperLoc,
const FunctionProtoType *Subset, SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic & NoteID,
const FunctionProtoType *Target, SourceLocation TargetLoc,
const FunctionProtoType *Source, SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// \brief The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// \brief Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
bool Suppressed;
TypeDiagnoser(bool Suppressed = false) : Suppressed(Suppressed) { }
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template<typename T1>
class BoundTypeDiagnoser1 : public TypeDiagnoser {
unsigned DiagID;
const T1 &Arg1;
public:
BoundTypeDiagnoser1(unsigned DiagID, const T1 &Arg1)
: TypeDiagnoser(DiagID == 0), DiagID(DiagID), Arg1(Arg1) { }
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
if (Suppressed) return;
S.Diag(Loc, DiagID) << getPrintable(Arg1) << T;
}
virtual ~BoundTypeDiagnoser1() { }
};
template<typename T1, typename T2>
class BoundTypeDiagnoser2 : public TypeDiagnoser {
unsigned DiagID;
const T1 &Arg1;
const T2 &Arg2;
public:
BoundTypeDiagnoser2(unsigned DiagID, const T1 &Arg1,
const T2 &Arg2)
: TypeDiagnoser(DiagID == 0), DiagID(DiagID), Arg1(Arg1),
Arg2(Arg2) { }
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
if (Suppressed) return;
S.Diag(Loc, DiagID) << getPrintable(Arg1) << getPrintable(Arg2) << T;
}
virtual ~BoundTypeDiagnoser2() { }
};
template<typename T1, typename T2, typename T3>
class BoundTypeDiagnoser3 : public TypeDiagnoser {
unsigned DiagID;
const T1 &Arg1;
const T2 &Arg2;
const T3 &Arg3;
public:
BoundTypeDiagnoser3(unsigned DiagID, const T1 &Arg1,
const T2 &Arg2, const T3 &Arg3)
: TypeDiagnoser(DiagID == 0), DiagID(DiagID), Arg1(Arg1),
Arg2(Arg2), Arg3(Arg3) { }
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
if (Suppressed) return;
S.Diag(Loc, DiagID)
<< getPrintable(Arg1) << getPrintable(Arg2) << getPrintable(Arg3) << T;
}
virtual ~BoundTypeDiagnoser3() { }
};
private:
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
public:
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template<typename T1>
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID, const T1 &Arg1) {
BoundTypeDiagnoser1<T1> Diagnoser(DiagID, Arg1);
return RequireCompleteType(Loc, T, Diagnoser);
}
template<typename T1, typename T2>
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID, const T1 &Arg1, const T2 &Arg2) {
BoundTypeDiagnoser2<T1, T2> Diagnoser(DiagID, Arg1, Arg2);
return RequireCompleteType(Loc, T, Diagnoser);
}
template<typename T1, typename T2, typename T3>
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID, const T1 &Arg1, const T2 &Arg2,
const T3 &Arg3) {
BoundTypeDiagnoser3<T1, T2, T3> Diagnoser(DiagID, Arg1, Arg2,
Arg3);
return RequireCompleteType(Loc, T, Diagnoser);
}
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template<typename T1>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const T1 &Arg1) {
BoundTypeDiagnoser1<T1> Diagnoser(DiagID, Arg1);
return RequireCompleteExprType(E, Diagnoser);
}
template<typename T1, typename T2>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const T1 &Arg1,
const T2 &Arg2) {
BoundTypeDiagnoser2<T1, T2> Diagnoser(DiagID, Arg1, Arg2);
return RequireCompleteExprType(E, Diagnoser);
}
template<typename T1, typename T2, typename T3>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const T1 &Arg1,
const T2 &Arg2, const T3 &Arg3) {
BoundTypeDiagnoser3<T1, T2, T3> Diagnoser(DiagID, Arg1, Arg2,
Arg3);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template<typename T1>
bool RequireLiteralType(SourceLocation Loc, QualType T,
unsigned DiagID, const T1 &Arg1) {
BoundTypeDiagnoser1<T1> Diagnoser(DiagID, Arg1);
return RequireLiteralType(Loc, T, Diagnoser);
}
template<typename T1, typename T2>
bool RequireLiteralType(SourceLocation Loc, QualType T,
unsigned DiagID, const T1 &Arg1, const T2 &Arg2) {
BoundTypeDiagnoser2<T1, T2> Diagnoser(DiagID, Arg1, Arg2);
return RequireLiteralType(Loc, T, Diagnoser);
}
template<typename T1, typename T2, typename T3>
bool RequireLiteralType(SourceLocation Loc, QualType T,
unsigned DiagID, const T1 &Arg1, const T2 &Arg2,
const T3 &Arg3) {
BoundTypeDiagnoser3<T1, T2, T3> Diagnoser(DiagID, Arg1, Arg2,
Arg3);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
QualType BuildDecltypeType(Expr *E, SourceLocation Loc);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
/// List of decls defined in a function prototype. This contains EnumConstants
/// that incorrectly end up in translation unit scope because there is no
/// function to pin them on. ActOnFunctionDeclarator reads this list and patches
/// them into the FunctionDecl.
std::vector<NamedDecl*> DeclsInPrototypeScope;
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false,
bool HasTrailingDot = false,
ParsedType ObjectType = ParsedType(),
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool AllowClassTemplates = false);
/// \brief For compatibility with MSVC, we delay parsing of some default
/// template type arguments until instantiation time. Emits a warning and
/// returns a synthesized DependentNameType that isn't really dependent on any
/// other template arguments.
ParsedType ActOnDelayedDefaultTemplateArg(const IdentifierInfo &II,
SourceLocation NameLoc);
/// \brief Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
NC_Unknown,
NC_Error,
NC_Keyword,
NC_Type,
NC_Expression,
NC_NestedNameSpecifier,
NC_TypeTemplate,
NC_VarTemplate,
NC_FunctionTemplate
};
class NameClassification {
NameClassificationKind Kind;
ExprResult Expr;
TemplateName Template;
ParsedType Type;
const IdentifierInfo *Keyword;
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {}
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword)
: Kind(NC_Keyword), Keyword(Keyword) { }
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification NestedNameSpecifier() {
return NameClassification(NC_NestedNameSpecifier);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
ExprResult getExpression() const {
assert(Kind == NC_Expression);
return Expr;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// \brief Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param IsAddressOfOperand True if this name is the operand of a unary
/// address of ('&') expression, assuming it is classified as an
/// expression.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *&Name,
SourceLocation NameLoc,
const Token &NextToken,
bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name,
SourceLocation Loc);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
void CheckShadow(Scope *S, VarDecl *D, const LookupResult& R);
void CheckShadow(Scope *S, VarDecl *D);
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
void CheckCompleteVariableDeclaration(VarDecl *var);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
bool CheckConstexprFunctionDecl(const FunctionDecl *FD);
bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsExplicitSpecialization);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit,
bool TypeMayContainAuto);
void ActOnUninitializedDecl(Decl *dcl, bool TypeMayContainAuto);
void ActOnInitializerError(Decl *Dcl);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group,
bool TypeMayContainAuto = true);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(FunctionDecl *FD,
const FunctionDecl *EffectiveDefinition =
nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// \brief Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// \brief Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineMethodDef(CXXMethodDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// \brief Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ParmVarDecl * const *Begin,
ParmVarDecl * const *End);
/// \brief Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Begin,
ParmVarDecl * const *End,
QualType ReturnTy,
NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// \brief Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S,
AttributeList *AttrList,
SourceLocation SemiLoc);
/// \brief The parser has processed a module import declaration.
///
/// \param AtLoc The location of the '@' symbol, if any.
///
/// \param ImportLoc The location of the 'import' keyword.
///
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc,
ModuleIdPath Path);
/// \brief The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// \brief Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// \brief Retrieve a suitable printing policy.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// \brief Retrieve a suitable printing policy.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
DeclSpec &DS);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation = false);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo &Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr, AccessSpecifier AS,
SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists,
bool &OwnedDecl, bool &IsDependent,
SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
AttributeList *MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
bool Diagnose = false);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD);
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope* S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields,
SourceLocation LBrac, SourceLocation RBrac,
AttributeList *AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceLocation RBraceLoc);
void ActOnObjCContainerFinishDefinition();
/// \brief Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, const EnumDecl *Prev);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
AttributeList *Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
SourceLocation RBraceLoc, Decl *EnumDecl,
ArrayRef<Decl *> Elements,
Scope *S, AttributeList *Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// \brief Make the given externally-produced declaration visible at the
/// top level scope.
///
/// \param D The externally-produced declaration to push.
///
/// \param Name The name of the externally-produced declaration.
void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range,
IdentifierInfo *Platform,
VersionTuple Introduced,
VersionTuple Deprecated,
VersionTuple Obsoleted,
bool IsUnavailable,
StringRef Message,
bool Override,
unsigned AttrSpellingListIndex);
TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range,
TypeVisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range,
VisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
MSInheritanceAttr *
mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase,
unsigned AttrSpellingListIndex,
MSInheritanceAttr::Spelling SemanticSpelling);
FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range,
IdentifierInfo *Format, int FormatIdx,
int FirstArg, unsigned AttrSpellingListIndex);
SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
/// \brief Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// \brief Don't merge availability attributes at all.
AMK_None,
/// \brief Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// \brief Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override
};
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl);
/// \brief Checks availability of the function depending on the current
/// function context.Inside an unavailable function,unavailability is ignored.
///
/// \returns true if \p FD is unavailable and current context is inside
/// an available function, false otherwise.
bool isFunctionConsideredUnavailable(FunctionDecl *FD);
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsNoReturnConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr ///< Constant expression in a noptr-new-declarator.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
/// \brief Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// \brief Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// \brief Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// \brief Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// \brief Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// \brief Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// \brief Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallPtrSet<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallPtrSet<CXXRecordDecl *, 16> AssociatedClassSet;
void AddOverloadCandidate(FunctionDecl *Function,
DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = false);
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false);
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false);
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false);
void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false);
void AddConversionCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet& CandidateSet,
bool AllowObjCConversionOnExplicit);
void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet,
bool AllowObjCConversionOnExplicit);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
SourceRange OpRange = SourceRange());
void AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType = QualType());
// Emit as a series of 'note's all template and non-templates
// identified by the expression Expr
void NoteAllOverloadCandidates(Expr* E, QualType DestType = QualType());
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
const SourceRange& OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
// An enum to represent whether something is dealing with a call to begin()
// or a call to end() in a range-based for loop.
enum BeginEndFunction {
BEF_begin,
BEF_end
};
ForRangeStatus BuildForRangeBeginEndCall(Scope *S, SourceLocation Loc,
SourceLocation RangeLoc,
VarDecl *Decl,
BeginEndFunction BEF,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
unsigned Opc,
const UnresolvedSetImpl &Fns,
Expr *input);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
unsigned Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ParmVarDecl *const *Param,
ParmVarDecl *const *ParamEnd,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// @brief Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// \brief Look up any declaration with any name.
LookupAnyName
};
/// \brief Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// \brief The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// \brief The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists.
ForRedeclaration
};
/// \brief The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// \brief The lookup resulted in an error.
LOLR_Error,
/// \brief The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// \brief The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// \brief The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// \brief The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult *LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
private:
bool CppLookupName(LookupResult &R, Scope *S);
// \brief The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// \brief Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
public:
/// \brief Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
void addOverloadedOperatorToUnresolvedSet(UnresolvedSetImpl &Functions,
DeclAccessPair Operator,
QualType T1, QualType T2);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate);
bool isKnownName(StringRef name);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const AttributeList *AttrList);
void checkUnusedDeclAttributes(Declarator &D);
bool CheckRegparmAttr(const AttributeList &attr, unsigned &value);
bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckNoReturnAttr(const AttributeList &attr);
bool checkStringLiteralArgumentAttr(const AttributeList &Attr,
unsigned ArgNum, StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceAttr::Spelling SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType &T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// \brief Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt, AttributeList *Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
typedef llvm::DenseMap<Selector, ObjCMethodDecl*> ProtocolsMethodsMap;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl *IDecl);
void DefaultSynthesizeProperties(Scope *S, Decl *D);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
Selector SetterSel,
const bool isAssign,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
bool *isOverridingProperty,
TypeSourceInfo *T,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
Selector SetterSel,
const bool isAssign,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
TypeSourceInfo *T,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// \brief Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool warn, bool instance);
/// \brief Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true,
bool IsUnqualifiedLookup = false) {
if (IsUnqualifiedLookup)
(void)UnqualifiedTyposCorrected[Typo];
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false,
bool warn=true) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
warn, /*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false,
bool warn=true) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
warn, /*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg(Sema &actions) : E(nullptr) { }
// FIXME: The const_cast here is ugly. RValue references would make this
// much nicer (or we could duplicate a bunch of the move semantics
// emulation code from Ownership.h).
FullExprArg(const FullExprArg& Other) : E(Other.E) {}
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(ActOnFinishFullExpr(Arg, CC).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt();
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// \brief A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S): S(S) {
S.ActOnStartOfCompoundStmt();
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal,
SourceLocation DotDotDotLoc, Expr *RHSVal,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
StmtResult ActOnIfStmt(SourceLocation IfLoc,
FullExprArg CondVal, Decl *CondVar,
Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Expr *Cond,
Decl *CondVar);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc,
FullExprArg Cond,
Decl *CondVar, Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc,
SourceLocation CondLParen, Expr *Cond,
SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First, FullExprArg Second,
Decl *SecondVar,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(SourceLocation ForLoc, Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *BeginEndDecl,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
bool AllowFunctionParameters);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
bool AllowFunctionParameters);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
SourceLocation RParenLoc);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
llvm::InlineAsmIdentifierInfo &Info,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler, int HandlerIndex,
int HandlerParentIndex);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr,
Stmt *Block);
StmtResult ActOnSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// \brief If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
enum AvailabilityDiagnostic { AD_Deprecation, AD_Unavailable };
void EmitAvailabilityWarning(AvailabilityDiagnostic AD,
NamedDecl *D, StringRef Message,
SourceLocation Loc,
const ObjCInterfaceDecl *UnknownObjCClass,
const ObjCPropertyDecl *ObjCProperty,
bool ObjCPropertyAccess);
void HandleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
bool makeUnavailableInSystemHeader(SourceLocation loc,
StringRef message);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D);
bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc,
const ObjCInterfaceDecl *UnknownObjCClass=nullptr,
bool ObjCPropertyAccess=false);
void NoteDeletedFunction(FunctionDecl *FD);
std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
bool IsDecltype = false);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
ReuseLambdaContextDecl_t,
bool IsDecltype = false);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool OdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E);
void MarkMemberReferenced(MemberExpr *E);
void UpdateMarkingForLValueToRValue(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// \brief Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// \brief Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// \brief Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// \brief Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// \brief Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// \brief Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty,
ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
ExprResult
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult BuildQualifiedDeclarationNameExpr(
CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentType IT);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
const SourceRange &ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
bool HasTrailingLParen;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl,
bool HasTrailingLParen);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
SourceLocation LParenLoc,
ArrayRef<Expr *> Arg,
SourceLocation RParenLoc,
Expr *Config = nullptr,
bool IsExecConfig = false);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// \brief Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation Loc,
bool GNUSyntax,
ExprResult Init);
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
OffsetOfComponent *CompPtr,
unsigned NumComponents,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
OffsetOfComponent *CompPtr,
unsigned NumComponents,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// \brief Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// \brief The symbol exists.
IER_Exists,
/// \brief The symbol does not exist.
IER_DoesNotExist,
/// \brief The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// \brief An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc,
IdentifierInfo *Ident,
SourceLocation LBrace,
AttributeList *AttrList);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
CXXRecordDecl *getStdBadAlloc() const;
/// \brief Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// \brief Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// \brief Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const CXXConstructorDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope,
SourceLocation UsingLoc,
SourceLocation NamespcLoc,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
AttributeList *AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(Scope *S, AccessSpecifier AS,
SourceLocation UsingLoc,
CXXScopeSpec &SS,
DeclarationNameInfo NameInfo,
AttributeList *AttrList,
bool IsInstantiation,
bool HasTypenameKeyword,
SourceLocation TypenameLoc);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
Decl *ActOnUsingDeclaration(Scope *CurScope,
AccessSpecifier AS,
bool HasUsingKeyword,
SourceLocation UsingLoc,
CXXScopeSpec &SS,
UnqualifiedId &Name,
AttributeList *AttrList,
bool HasTypenameKeyword,
SourceLocation TypenameLoc);
Decl *ActOnAliasDeclaration(Scope *CurScope,
AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc,
UnqualifiedId &Name,
AttributeList *AttrList,
TypeResult Type);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// \brief Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// \brief Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(ComputedEST != EST_ComputedNoexcept &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// \brief The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// \brief The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// \brief Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// \brief Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E);
/// \brief Overwrite an EPI's exception specification with this
/// computed exception specification.
void getEPI(FunctionProtoType::ExtProtoInfo &EPI) const {
EPI.ExceptionSpecType = getExceptionSpecType();
if (EPI.ExceptionSpecType == EST_Dynamic) {
EPI.NumExceptions = size();
EPI.Exceptions = data();
} else if (EPI.ExceptionSpecType == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
EPI.ExceptionSpecType = EST_ComputedNoexcept;
EPI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
}
FunctionProtoType::ExtProtoInfo getEPI() const {
FunctionProtoType::ExtProtoInfo EPI;
getEPI(EPI);
return EPI;
}
};
/// \brief Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defautled
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(CXXConstructorDecl *CD);
/// \brief Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD);
/// \brief Check the given exception-specification and update the
/// extended prototype information with the results.
void checkExceptionSpecification(ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExtProtoInfo &EPI);
/// \brief Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
bool Diagnose = false);
/// \brief Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// \brief Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl,
CXXDestructorDecl *Destructor);
/// \brief Declare all inheriting constructors for the given class.
///
/// \param ClassDecl The class declaration into which the inheriting
/// constructors will be added.
void DeclareInheritingConstructors(CXXRecordDecl *ClassDecl);
/// \brief Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// \brief Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// \brief Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// \brief Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// \brief Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// \brief Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// \brief Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// \brief Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// \brief Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorType(const DeclSpec& DS, ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// \brief Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// \brief When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// \brief RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// \brief Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// \brief Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr);
/// \brief Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
ExprResult CheckCXXThrowOperand(SourceLocation ThrowLoc, Expr *E,
bool IsThrownVarInScope);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Expr *ArraySize,
SourceRange DirectInitRange,
Expr *Initializer,
bool TypeMayContainAuto = true);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
bool UseGlobal, QualType AllocType, bool IsArray,
MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete);
bool FindAllocationOverload(SourceLocation StartLoc, SourceRange Range,
DeclarationName Name, MultiExprArg Args,
DeclContext *Ctx,
bool AllowMissing, FunctionDecl *&Operator,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
QualType Param1,
QualType Param2 = QualType(),
bool addMallocAttr = false);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
DeclarationName Name);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
bool ConvertToBoolean);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// \brief Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the bianry type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult DiagnoseDtorReference(SourceLocation NameLoc, Expr *MemExpr);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType,
bool HasTrailingLParen);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName,
bool HasTrailingLParen);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS,
bool HasTrailingLParen);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
ExprResult ActOnFinishFullExpr(Expr *Expr) {
return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc()
: SourceLocation());
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue = false,
bool IsConstexpr = false,
bool IsLambdaInitCaptureInitializer = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// \brief The parser has parsed a global nested-name-specifier '::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(Scope *S, SourceLocation CCLoc,
CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
SourceLocation IdLoc,
IdentifierInfo &II,
ParsedType ObjectType);
bool BuildCXXNestedNameSpecifier(Scope *S,
IdentifierInfo &Identifier,
SourceLocation IdentifierLoc,
SourceLocation CCLoc,
QualType ObjectType,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr);
/// \brief The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param Identifier The identifier preceding the '::'.
///
/// \param IdentifierLoc The location of the identifier.
///
/// \param CCLoc The location of the '::'.
///
/// \param ObjectType The type of the object, if we're parsing
/// nested-name-specifier in a member access expression.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
IdentifierInfo &Identifier,
SourceLocation IdentifierLoc,
SourceLocation CCLoc,
ParsedType ObjectType,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo &Identifier,
SourceLocation IdentifierLoc,
SourceLocation ColonLoc,
ParsedType ObjectType,
bool EnteringContext);
/// \brief The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// \brief Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// \brief Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// \brief Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// \brief Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params);
/// \brief Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// \brief Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
QualType performLambdaInitCaptureInitialization(SourceLocation Loc,
bool ByRef, IdentifierInfo *Id, Expr *&Init);
/// \brief Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType, IdentifierInfo *Id, Expr *Init);
/// \brief Build the implicit field for an init-capture.
FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// \brief Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief Introduce the lambda parameters into scope.
void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope);
/// \brief Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope,
bool IsInstantiation = false);
/// \brief Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// \brief Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
Expr **Strings,
unsigned NumStrings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *" or "NSString *" depending on the type of
/// ValueType, which is allowed to be a built-in numeric type or
/// "char *" or "const char *".
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
ObjCDictionaryElement *Elements,
unsigned NumElements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access,
SourceLocation ASLoc,
SourceLocation ColonLoc,
AttributeList *Attrs = nullptr);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// \brief The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// \brief The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// \brief The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// \brief Load any externally-stored vtable uses.
void LoadExternalVTableUses();
typedef LazyVector<CXXRecordDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDynamicClasses, 2, 2>
DynamicClassesType;
/// \brief A list of all of the dynamic classes in this translation
/// unit.
DynamicClassesType DynamicClasses;
/// \brief Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// \brief Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc,
const CXXRecordDecl *RD);
/// \brief Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
void CheckCompletedCXXClass(CXXRecordDecl *Record);
void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc,
Decl *TagDecl,
SourceLocation LBrac,
SourceLocation RBrac,
AttributeList *AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD);
void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD,
const FunctionProtoType *T);
void CheckDelayedMemberExceptionSpecs();
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases,
unsigned NumBases);
void ActOnBaseSpecifiers(Decl *ClassDecl, CXXBaseSpecifier **Bases,
unsigned NumBases);
bool IsDerivedFrom(QualType Derived, QualType Base);
bool IsDerivedFrom(QualType Derived, QualType Base, CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool BasePathInvolvesVirtualBase(const CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
const InitializedEntity &Entity,
AccessSpecifier Access,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
const InitializedEntity &Entity,
AccessSpecifier Access,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx);
bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
AccessSpecifier access,
QualType objectType);
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// \brief When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template<typename T1>
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
unsigned DiagID,
const T1 &Arg1) {
BoundTypeDiagnoser1<T1> Diagnoser(DiagID, Arg1);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
template<typename T1, typename T2>
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
unsigned DiagID,
const T1 &Arg1, const T2 &Arg2) {
BoundTypeDiagnoser2<T1, T2> Diagnoser(DiagID, Arg1, Arg2);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
template<typename T1, typename T2, typename T3>
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
unsigned DiagID,
const T1 &Arg1, const T2 &Arg2, const T3 &Arg3) {
BoundTypeDiagnoser3<T1, T2, T3> Diagnoser(DiagID, Arg1, Arg2, Arg3);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
AbstractDiagSelID SelID = AbstractNone);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true);
void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
Decl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
Decl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
Decl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
Decl **Params, unsigned NumParams,
SourceLocation RAngleLoc);
/// \brief The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsExplicitSpecialization, bool &Invalid);
DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr,
TemplateParameterList *TemplateParams,
AccessSpecifier AS,
SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc,
unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false);
/// \brief Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template);
DeclResult
ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc,
SourceLocation ModulePrivateLoc,
TemplateIdAnnotation &TemplateId,
AttributeList *Attr,
MultiTemplateParamsArg TemplateParameterLists);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
Decl *ActOnStartOfFunctionTemplateDef(Scope *FnBodyScope,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(FunctionDecl *FD,
TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult
ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec,
SourceLocation KWLoc,
const CXXScopeSpec &SS,
TemplateTy Template,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
AttributeList *Attr);
DeclResult
ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec,
SourceLocation KWLoc,
CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
AttributeList *Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// \brief Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// \brief The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// \brief The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// \brief The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// \brief Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateArgumentLoc &Arg,
unsigned ArgumentPackIndex);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// \brief Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// \brief We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// \brief We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// \brief We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// \brief Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// \brief Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// \brief The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// \brief An arbitrary expression.
UPPC_Expression = 0,
/// \brief The base type of a class type.
UPPC_BaseType,
/// \brief The type of an arbitrary declaration.
UPPC_DeclarationType,
/// \brief The type of a data member.
UPPC_DataMemberType,
/// \brief The size of a bit-field.
UPPC_BitFieldWidth,
/// \brief The expression in a static assertion.
UPPC_StaticAssertExpression,
/// \brief The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// \brief The enumerator value.
UPPC_EnumeratorValue,
/// \brief A using declaration.
UPPC_UsingDeclaration,
/// \brief A friend declaration.
UPPC_FriendDeclaration,
/// \brief A declaration qualifier.
UPPC_DeclarationQualifier,
/// \brief An initializer.
UPPC_Initializer,
/// \brief A default argument.
UPPC_DefaultArgument,
/// \brief The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// \brief The type of an exception.
UPPC_ExceptionType,
/// \brief Partial specialization.
UPPC_PartialSpecialization,
/// \brief Microsoft __if_exists.
UPPC_IfExists,
/// \brief Microsoft __if_not_exists.
UPPC_IfNotExists,
/// \brief Lambda expression.
UPPC_Lambda,
/// \brief Block expression,
UPPC_Block
};
/// \brief Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// \brief If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// \brief If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// \brief If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// \brief If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// \brief If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// \brief If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// \brief Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param SS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(CXXScopeSpec &SS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// \brief Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// \brief Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// \brief Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// \brief Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// \brief Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// \brief Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType);
/// \brief Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// \brief Template argument deduction was successful.
TDK_Success = 0,
/// \brief The declaration was invalid; do nothing.
TDK_Invalid,
/// \brief Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// \brief Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// \brief Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// \brief Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// \brief Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// \brief A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// \brief When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// \brief When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// \brief The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// \brief The arguments included an overloaded function name that could
/// not be resolved to a suitable function.
TDK_FailedOverloadResolution,
/// \brief Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType,
unsigned ArgIdx,
QualType OriginalArgType)
: OriginalParamType(OriginalParamType), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) { }
QualType OriginalParamType;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult
FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool InOverloadResolution = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool InOverloadResolution = false);
/// \brief Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// \brief Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// \brief Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer,
QualType &Result);
DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer,
QualType &Result);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments1,
unsigned NumCallArguments2);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// \brief A template instantiation that is currently in progress.
struct ActiveTemplateInstantiation {
/// \brief The kind of template instantiation we are performing
enum InstantiationKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template, and
/// TemplateArgs/NumTemplateArguments provides the template
/// arguments as specified.
/// FIXME: Use a TemplateArgumentList
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a ClassTemplatePartialSpecializationDecl or
/// a FunctionTemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation
} Kind;
/// \brief The point of instantiation within the source code.
SourceLocation PointOfInstantiation;
/// \brief The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// \brief The entity that is being instantiated.
Decl *Entity;
/// \brief The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
/// \brief The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// \brief The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// \brief The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
ActiveTemplateInstantiation()
: Kind(TemplateInstantiation), Template(nullptr), Entity(nullptr),
TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {}
/// \brief Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
friend bool operator==(const ActiveTemplateInstantiation &X,
const ActiveTemplateInstantiation &Y) {
if (X.Kind != Y.Kind)
return false;
if (X.Entity != Y.Entity)
return false;
switch (X.Kind) {
case TemplateInstantiation:
case ExceptionSpecInstantiation:
return true;
case PriorTemplateArgumentSubstitution:
case DefaultTemplateArgumentChecking:
return X.Template == Y.Template && X.TemplateArgs == Y.TemplateArgs;
case DefaultTemplateArgumentInstantiation:
case ExplicitTemplateArgumentSubstitution:
case DeducedTemplateArgumentSubstitution:
case DefaultFunctionArgumentInstantiation:
return X.TemplateArgs == Y.TemplateArgs;
}
llvm_unreachable("Invalid InstantiationKind!");
}
friend bool operator!=(const ActiveTemplateInstantiation &X,
const ActiveTemplateInstantiation &Y) {
return !(X == Y);
}
};
/// \brief List of active template instantiations.
///
/// This vector is treated as a stack. As one template instantiation
/// requires another template instantiation, additional
/// instantiations are pushed onto the stack up to a
/// user-configurable limit LangOptions::InstantiationDepth.
SmallVector<ActiveTemplateInstantiation, 16>
ActiveTemplateInstantiations;
/// \brief Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> ActiveTemplateInstantiationLookupModules;
/// \brief Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// \brief Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// \brief Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// \brief The number of ActiveTemplateInstantiation entries in
/// \c ActiveTemplateInstantiations that are not actual instantiations and,
/// therefore, should not be counted as part of the instantiation depth.
unsigned NonInstantiationEntries;
/// \brief The last template from which a template instantiation
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant template
/// instantiation backtraces when there are multiple errors in the
/// same instantiation. FIXME: Does this belong in Sema? It's tough
/// to implement it anywhere else.
ActiveTemplateInstantiation LastTemplateInstantiationErrorContext;
/// \brief The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// \brief RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// \brief The stack of calls expression undergoing template instantiation.
///
/// The top of this stack is used by a fixit instantiating unresolved
/// function calls to fix the AST to match the textual change it prints.
SmallVector<CallExpr *, 8> CallsUndergoingInstantiation;
/// \brief For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// \brief A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// \brief Note that we are instantiating a class template,
/// function template, or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// \brief Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
ActiveTemplateInstantiation::InstantiationKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// \brief Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
private:
Sema &SemaRef;
bool Invalid;
bool SavedInNonInstantiationSFINAEContext;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
// FIXME: Replace this with a constructor once we can use delegating
// constructors in llvm.
void Initialize(
ActiveTemplateInstantiation::InstantiationKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = ArrayRef<TemplateArgument>(),
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) LLVM_DELETED_FUNCTION;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) LLVM_DELETED_FUNCTION;
};
void PrintInstantiationStack();
/// \brief Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// \brief Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// \brief RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE)
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
}
/// \brief Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// \brief RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// \brief The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// \brief Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// \brief The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::DenseMap<IdentifierInfo *, TypoCorrection>
UnqualifiedTyposCorrectedMap;
/// \brief A cache containing the results of typo correction for unqualified
/// name lookup.
///
/// The string is the string that we corrected to (which may be empty, if
/// there was no correction), while the boolean will be true when the
/// string represents a keyword.
UnqualifiedTyposCorrectedMap UnqualifiedTyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// \brief A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// \brief Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
/// \brief An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// \brief The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// \brief The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class SavePendingLocalImplicitInstantiationsRAII {
public:
SavePendingLocalImplicitInstantiationsRAII(Sema &S): S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
~SavePendingLocalImplicitInstantiationsRAII() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
unsigned ThisTypeQuals);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc,
ParmVarDecl **Params, unsigned NumParams,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams = nullptr);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// \brief Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param NumExprs The number of expressions in \p Exprs.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(Expr **Exprs, unsigned NumExprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false);
void InstantiateStaticDataMemberDefinition(
SourceLocation PointOfInstantiation,
VarDecl *Var,
bool Recursive = false,
bool DefinitionRequired = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
Decl *ActOnStartClassInterface(SourceLocation AtInterfaceLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
Decl * const *ProtoRefs,
unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc,
AttributeList *AttrList);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc,
IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc,
Decl * const *ProtoRefNames, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc,
AttributeList *AttrList);
Decl *ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CategoryName,
SourceLocation CategoryLoc,
Decl * const *ProtoRefs,
unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc);
Decl *ActOnStartClassImplementation(
SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName, SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
unsigned NumElts);
DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
const IdentifierLocPair *IdentList,
unsigned NumElts,
AttributeList *attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations,
const IdentifierLocPair *ProtocolId,
unsigned NumProtocols,
SmallVectorImpl<Decl *> &Protocols);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
/// \param CD The semantic container for the property
/// \param redeclaredProperty Declaration for property if redeclared
/// in class extension.
/// \param lexicalDC Container for redeclaredProperty.
void ProcessPropertyDecl(ObjCPropertyDecl *property,
ObjCContainerDecl *CD,
ObjCPropertyDecl *redeclaredProperty = nullptr,
ObjCContainerDecl *lexicalDC = nullptr);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
bool *OverridingProperty,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
AttributeList *ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType,
ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo,
DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args
AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// \brief Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// \brief The message is sent to 'super'.
ObjCSuperMessage,
/// \brief The message is an instance message.
ObjCInstanceMessage,
/// \brief The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// \brief Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// \brief Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
enum PragmaPackKind {
PPK_Default, // #pragma pack([n])
PPK_Show, // #pragma pack(show), only supported by MSVC.
PPK_Push, // #pragma pack(push, [identifier], [n])
PPK_Pop // #pragma pack(pop, [identifier], [n])
};
enum PragmaMSStructKind {
PMSST_OFF, // #pragms ms_struct off
PMSST_ON // #pragms ms_struct on
};
enum PragmaMSCommentKind {
PCK_Unknown,
PCK_Linker, // #pragma comment(linker, ...)
PCK_Lib, // #pragma comment(lib, ...)
PCK_Compiler, // #pragma comment(compiler, ...)
PCK_ExeStr, // #pragma comment(exestr, ...)
PCK_User // #pragma comment(user, ...)
};
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(PragmaPackKind Kind,
IdentifierInfo *Name,
Expr *Alignment,
SourceLocation PragmaLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(PragmaMSCommentKind Kind, StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// \brief Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaVtorDispKind Kind, SourceLocation PragmaLoc,
MSVtorDispAttr::Mode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
enum PragmaSectionFlag : unsigned {
PSF_None = 0,
PSF_Read = 0x1,
PSF_Write = 0x2,
PSF_Execute = 0x4,
PSF_Implicit = 0x8,
PSF_Invalid = 0x80000000U,
};
struct SectionInfo {
DeclaratorDecl *Decl;
SourceLocation PragmaSectionLocation;
int SectionFlags;
SectionInfo() {}
SectionInfo(DeclaratorDecl *Decl,
SourceLocation PragmaSectionLocation,
int SectionFlags)
: Decl(Decl),
PragmaSectionLocation(PragmaSectionLocation),
SectionFlags(SectionFlags) {}
};
llvm::StringMap<SectionInfo> SectionInfos;
bool UnifySection(const StringRef &SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(const StringRef &SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// \brief Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// \brief Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// \brief Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(StringRef Name, StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT
void ActOnPragmaFPContract(tok::OnOffSwitch OOS);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
/// \brief Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// \brief Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// \brief Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// \brief Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex, bool IsPackExpansion);
void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T,
unsigned SpellingListIndex, bool IsPackExpansion);
// OpenMP directives and clauses.
private:
void *VarDataSharingAttributesStack;
/// \brief Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op,
OpenMPClauseKind CKind);
public:
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// \brief Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// \brief Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
// OpenMP directives and clauses.
/// \brief Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope,
CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id);
/// \brief Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// \brief Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// \brief Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
StmtResult ActOnOpenMPExecutableDirective(OpenMPDirectiveKind Kind,
const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind,
unsigned Argument, Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ArgumentLoc,
SourceLocation CommaLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(OpenMPScheduleClauseKind Kind,
Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation KindLoc,
SourceLocation CommaLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'ordered' clause.
OMPClause *ActOnOpenMPOrderedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
OMPClause *
ActOnOpenMPVarListClause(OpenMPClauseKind Kind, ArrayRef<Expr *> Vars,
Expr *TailExpr, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc,
SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId);
/// \brief Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'reduction' clause.
OMPClause *
ActOnOpenMPReductionClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc,
SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId);
/// \brief Called on well-formed 'linear' clause.
OMPClause *ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList,
Expr *Step,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief The kind of conversion being performed.
enum CheckedConversionKind {
/// \brief An implicit conversion.
CCK_ImplicitConversion,
/// \brief A C-style cast.
CCK_CStyleCast,
/// \brief A functional-style cast.
CCK_FunctionalCast,
/// \brief A cast other than a C-style cast.
CCK_OtherCast
};
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign = false);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatiblePointer - The assignment is between two pointers types which
/// point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and prepare for a conversion of the
/// RHS to the LHS type.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind);
// CheckSingleAssignmentConstraints - Currently used by
// CheckAssignmentOperands, and ActOnReturnStmt. Prior to type checking,
// this routine performs the default function/array converions.
AssignConvertType CheckSingleAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
bool Diagnose = true,
bool DiagnoseCFAudited = false);
// \brief If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc,
QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc,
bool IsCompAssign = false);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned OpaqueOpc,
bool isRelational);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool *NonStandardCompositeType = nullptr);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool *NonStandardCompositeType = nullptr) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp,
NonStandardCompositeType);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool isRelational);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible_With_Added_Qualification - The two types are
/// reference-compatible with added qualification, meaning that
/// they are reference-compatible and the qualifiers on T1 (cv1)
/// are greater than the qualifiers on T2 (cv2).
Ref_Compatible_With_Added_Qualification,
/// Ref_Compatible - The two types are reference-compatible and
/// have equivalent qualifiers (cv1 == cv2).
Ref_Compatible
};
ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc,
QualType T1, QualType T2,
bool &DerivedToBase,
bool &ObjCConversion,
bool &ObjCLifetimeConversion);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// \brief Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// \brief Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged };
/// \brief Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds.
ARCConversionResult CheckObjCARCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage,
SourceLocation lbrac, SourceLocation rbrac,
QualType &ReturnType, ExprValueKind &VK);
/// \brief Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(QualType ReceiverType,
ObjCMethodDecl *Method,
bool isClassMessage, bool isSuperMessage);
/// \brief If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// \brief Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(Expr *E, SourceLocation Loc);
ExprResult ActOnBooleanCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// \brief Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// \brief Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice
};
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D);
bool CheckCUDATarget(CUDAFunctionTarget CallerTarget,
CUDAFunctionTarget CalleeTarget);
bool CheckCUDATarget(const FunctionDecl *Caller, const FunctionDecl *Callee) {
return CheckCUDATarget(IdentifyCUDATarget(Caller),
IdentifyCUDATarget(Callee));
}
/// \name Code completion
//@{
/// \brief Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// \brief Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// \brief Code completion occurs within a class, struct, or union.
PCC_Class,
/// \brief Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// \brief Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// \brief Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// \brief Code completion occurs following one or more template
/// headers.
PCC_Template,
/// \brief Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// \brief Code completion occurs within an expression.
PCC_Expression,
/// \brief Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// \brief Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// \brief Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// \brief Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// \brief Code completion occurs where only a type is permitted.
PCC_Type,
/// \brief Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// \brief Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool IsArrow);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteCase(Scope *S);
void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteReturn(Scope *S);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(IdentifierLocPair *Protocols,
unsigned NumProtocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S,
bool IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteNaturalLanguage();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, ArrayRef<const Expr *> Args,
unsigned NumParams, bool IsMemberFunction, SourceLocation Loc,
SourceRange Range, VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(CallExpr *TheCall);
bool SemaBuiltinVAStartARM(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum,
int Low, int High);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
void CheckFormatString(const StringLiteral *FExpr, const Expr *OrigFormatExpr,
ArrayRef<const Expr *> Args, bool HasVAListArg,
unsigned format_idx, unsigned firstDataArg,
FormatStringType Type, bool inFunctionCall,
VariadicCallType CallType,
llvm::SmallBitVector &CheckedVarArgs);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl,
IdentifierInfo *FnInfo);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
void CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr* RHS);
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(Expr *E);
/// \brief Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// \brief Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
public:
/// \brief Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// \brief A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// \brief Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const Expr * const *ExprArgs);
/// \brief The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTWriter;
public:
/// \brief Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSLocalManglingNumber() const {
return CurScope->incrementMSLocalManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
AvailabilityResult getCurContextAvailability() const;
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
};
/// \brief RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
public:
EnterExpressionEvaluationContext(Sema &Actions,
Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
bool IsDecltype = false)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
IsDecltype);
}
EnterExpressionEvaluationContext(Sema &Actions,
Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
bool IsDecltype = false)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(NewContext,
Sema::ReuseLambdaContextDecl,
IsDecltype);
}
~EnterExpressionEvaluationContext() {
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// \brief Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// \brief The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
#endif
|
convolution-3d.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include "polybench.h"
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-3d.h"
/* Array initialization. */
static
void init_array (int ni, int nj, int nk,
DATA_TYPE POLYBENCH_3D_CUDA(A,NI,NJ,NK,ni,nj,nk))
{
int i, j, k;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
for (k = 0; k < nk; k++)
{
//A[i][j][k] = i % 12 + 2 * (j % 7) + 3 * (k % 13);
POLYBENCH_3D_REF(A,NI,NJ,NK,i,j,k) = i % 12 + 2 * (j % 7) + 3 * (k % 13);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj, int nk,
DATA_TYPE POLYBENCH_3D_CUDA(B,NI,NJ,NK,ni,nj,nk))
{
int i, j, k;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
for (k = 0; j < nk; k++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, POLYBENCH_3D_REF(B,NI,NJ,NK,i,j,k));
if (((i * NJ + j) * NK + k) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
int nk,
DATA_TYPE POLYBENCH_3D_CUDA(A,NI,NJ,NK,ni,nj,nk),
DATA_TYPE POLYBENCH_3D_CUDA(B,NI,NJ,NK,ni,nj,nk))
{
//int i, j, k;
//#pragma omp parallel
//{
//#pragma omp for collapse(2)
#P1
for (int i = 1; i < _PB_NI - 1; ++i)
for (int j = 1; j < _PB_NJ - 1; ++j)
for (int k = 1; k < _PB_NK - 1; ++k)
{
POLYBENCH_3D_REF(B,NI,NJ,NK,i,j,k)
= 2 * POLYBENCH_3D_REF(A,NI,NJ,NK,i-1,j-1,k-1) + 4 * POLYBENCH_3D_REF(A,NI,NJ,NK,i+1,j-1,k-1)
+ 5 * POLYBENCH_3D_REF(A,NI,NJ,NK,i-1,j-1,k-1) + 7 * POLYBENCH_3D_REF(A,NI,NJ,NK,i+1,j-1,k-1)
+ -8 * POLYBENCH_3D_REF(A,NI,NJ,NK,i-1,j-1,k-1) + 10 * POLYBENCH_3D_REF(A,NI,NJ,NK,i+1,j-1,k-1)
+ -3 * POLYBENCH_3D_REF(A,NI,NJ,NK,i,j-1,k)
+ 6 * POLYBENCH_3D_REF(A,NI,NJ,NK,i,j,k)
+ -9 * POLYBENCH_3D_REF(A,NI,NJ,NK,i,j+1,k)
+ 2 * POLYBENCH_3D_REF(A,NI,NJ,NK,i-1,j-1,k+1) + 4 * POLYBENCH_3D_REF(A,NI,NJ,NK,i+1,j-1,k+1)
+ 5 * POLYBENCH_3D_REF(A,NI,NJ,NK,i+1,j,k+1) + 7 * POLYBENCH_3D_REF(A,NI,NJ,NK,i+1,j,k+1)
+ -8 * POLYBENCH_3D_REF(A,NI,NJ,NK,i-1,j+1,k+1) + 10 * POLYBENCH_3D_REF(A,NI,NJ,NK,i+1,j+1,k+1);
}
//}
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
int nk = NK;
/* Variable declaration/allocation. */
POLYBENCH_3D_ARRAY_DECL_CUDA(A, DATA_TYPE, NI, NJ, NK, ni, nj, nk);
POLYBENCH_3D_ARRAY_DECL_CUDA(B, DATA_TYPE, NI, NJ, NK, ni, nj, nk);
/* Initialize array(s). */
init_array (ni, nj, nk, POLYBENCH_ARRAY_CUDA(A));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_conv2d (ni, nj, nk, POLYBENCH_ARRAY_CUDA(A), POLYBENCH_ARRAY_CUDA(B));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, nk, POLYBENCH_ARRAY_CUDA(B)));
/* Be clean. */
//POLYBENCH_FREE_ARRAY(A);
//POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
GB_ijsort.c | //------------------------------------------------------------------------------
// GB_ijsort: sort an index array I and remove duplicates
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Sort an index array and remove duplicates. In MATLAB notation:
/*
[I1 I1k] = sort (I) ;
Iduplicate = [(I1 (1:end-1) == I1 (2:end)), false] ;
I2 = I1 (~Iduplicate) ;
I2k = I1k (~Iduplicate) ;
*/
#include "GB_ij.h"
#include "GB_sort.h"
#define GB_FREE_WORK \
{ \
GB_FREE_MEMORY (Count, ntasks+1, sizeof (int64_t)) ; \
GB_FREE_MEMORY (W0, ni, sizeof (GrB_Index)) ; \
GB_FREE_MEMORY (W1, ni, sizeof (GrB_Index)) ; \
GB_FREE_MEMORY (I1, ni, sizeof (GrB_Index)) ; \
GB_FREE_MEMORY (I1k, ni, sizeof (GrB_Index)) ; \
}
GrB_Info GB_ijsort
(
const GrB_Index *restrict I, // size ni, where ni > 1 always holds
int64_t *restrict p_ni, // : size of I, output: # of indices in I2
GrB_Index *restrict *p_I2, // size ni2, where I2 [0..ni2-1]
// contains the sorted indices with duplicates removed.
GrB_Index *restrict *p_I2k, // output array of size ni2
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (I != NULL) ;
ASSERT (p_ni != NULL) ;
ASSERT (p_I2 != NULL) ;
ASSERT (p_I2k != NULL) ;
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
GrB_Index *restrict I1 = NULL ;
GrB_Index *restrict I1k = NULL ;
GrB_Index *restrict I2 = NULL ;
GrB_Index *restrict I2k = NULL ;
int64_t *restrict W0 = NULL ;
int64_t *restrict W1 = NULL ;
int64_t ni = *p_ni ;
ASSERT (ni > 1) ;
int64_t *restrict Count = NULL ; // size ntasks+1
int ntasks = 0 ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (ni, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
GB_MALLOC_MEMORY (I1, ni, sizeof (GrB_Index)) ;
GB_MALLOC_MEMORY (I1k, ni, sizeof (GrB_Index)) ;
if (I1 == NULL || I1k == NULL)
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// copy I into I1 and construct I1k
//--------------------------------------------------------------------------
GB_memcpy (I1, I, ni * sizeof (GrB_Index), nthreads) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t k = 0 ; k < ni ; k++)
{
// the key is selected so that the last duplicate entry comes first in
// the sorted result. It must be adjusted later, so that the kth entry
// has a key equal to k.
I1k [k] = (ni-k) ;
}
//--------------------------------------------------------------------------
// sort [I1 I1k]
//--------------------------------------------------------------------------
if (nthreads == 1)
{
//----------------------------------------------------------------------
// sequential quicksort
//----------------------------------------------------------------------
GB_qsort_2 ((int64_t *) I1, (int64_t *) I1k, ni) ;
}
else
{
//----------------------------------------------------------------------
// parallel mergesort
//----------------------------------------------------------------------
GB_MALLOC_MEMORY (W0, ni, sizeof (int64_t)) ;
GB_MALLOC_MEMORY (W1, ni, sizeof (int64_t)) ;
if (W0 == NULL || W1 == NULL)
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
GB_msort_2 ((int64_t *) I1, (int64_t *) I1k, W0, W1, ni, nthreads) ;
GB_FREE_MEMORY (W0, ni, sizeof (int64_t)) ;
GB_FREE_MEMORY (W1, ni, sizeof (int64_t)) ;
}
//--------------------------------------------------------------------------
// determine number of tasks to create
//--------------------------------------------------------------------------
ntasks = (nthreads == 1) ? 1 : (32 * nthreads) ;
ntasks = GB_IMIN (ntasks, ni) ;
ntasks = GB_IMAX (ntasks, 1) ;
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
GB_MALLOC_MEMORY (Count, ntasks+1, sizeof (int64_t)) ;
if (Count == NULL)
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// count unique entries in I1
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (int tid = 0 ; tid < ntasks ; tid++)
{
int64_t kfirst, klast, my_count = (tid == 0) ? 1 : 0 ;
GB_PARTITION (kfirst, klast, ni, tid, ntasks) ;
for (int64_t k = GB_IMAX (kfirst,1) ; k < klast ; k++)
{
if (I1 [k-1] != I1 [k])
{
my_count++ ;
}
}
Count [tid] = my_count ;
}
GB_cumsum (Count, ntasks, NULL, 1) ;
int64_t ni2 = Count [ntasks] ;
//--------------------------------------------------------------------------
// allocate the result I2
//--------------------------------------------------------------------------
GB_MALLOC_MEMORY (I2 , ni2, sizeof (GrB_Index)) ;
GB_MALLOC_MEMORY (I2k, ni2, sizeof (GrB_Index)) ;
if (I2 == NULL || I2k == NULL)
{
// out of memory
GB_FREE_WORK ;
GB_FREE_MEMORY (I2 , ni2, sizeof (GrB_Index)) ;
GB_FREE_MEMORY (I2k, ni2, sizeof (GrB_Index)) ;
return (GB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// construct the new list I2 from I1, removing duplicates
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (int tid = 0 ; tid < ntasks ; tid++)
{
int64_t kfirst, klast, k2 = Count [tid] ;
GB_PARTITION (kfirst, klast, ni, tid, ntasks) ;
if (tid == 0)
{
// the first entry in I1 is never a duplicate
I2 [k2] = I1 [0] ;
I2k [k2] = (ni - I1k [0]) ;
k2++ ;
}
for (int64_t k = GB_IMAX (kfirst,1) ; k < klast ; k++)
{
if (I1 [k-1] != I1 [k])
{
I2 [k2] = I1 [k] ;
I2k [k2] = ni - I1k [k] ;
k2++ ;
}
}
}
//--------------------------------------------------------------------------
// check result: compare with single-pass, single-threaded algorithm
//--------------------------------------------------------------------------
#ifdef GB_DEBUG
{
int64_t ni1 = 1 ;
I1k [0] = ni - I1k [0] ;
for (int64_t k = 1 ; k < ni ; k++)
{
if (I1 [ni1-1] != I1 [k])
{
I1 [ni1] = I1 [k] ;
I1k [ni1] = ni - I1k [k] ;
ni1++ ;
}
}
ASSERT (ni1 == ni2) ;
for (int64_t k = 0 ; k < ni1 ; k++)
{
ASSERT (I1 [k] == I2 [k]) ;
ASSERT (I1k [k] == I2k [k]) ;
}
}
#endif
//--------------------------------------------------------------------------
// free workspace and return the new sorted list
//--------------------------------------------------------------------------
GB_FREE_WORK ;
*(p_I2 ) = (GrB_Index *) I2 ;
*(p_I2k) = (GrB_Index *) I2k ;
*(p_ni ) = (int64_t ) ni2 ;
return (GrB_SUCCESS) ;
}
|
GB_binop__div_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__div_uint8)
// A.*B function (eWiseMult): GB (_AemultB_01__div_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__div_uint8)
// A.*B function (eWiseMult): GB (_AemultB_03__div_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__div_uint8)
// A*D function (colscale): GB (_AxD__div_uint8)
// D*A function (rowscale): GB (_DxB__div_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__div_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__div_uint8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_uint8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_uint8)
// C=scalar+B GB (_bind1st__div_uint8)
// C=scalar+B' GB (_bind1st_tran__div_uint8)
// C=A+scalar GB (_bind2nd__div_uint8)
// C=A'+scalar GB (_bind2nd_tran__div_uint8)
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = GB_IDIV_UNSIGNED (aij, bij, 8)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IDIV_UNSIGNED (x, y, 8) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_DIV || GxB_NO_UINT8 || GxB_NO_DIV_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__div_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__div_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__div_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__div_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__div_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__div_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__div_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__div_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__div_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__div_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__div_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__div_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (x, bij, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__div_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (aij, y, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (x, aij, 8) ; \
}
GrB_Info GB (_bind1st_tran__div_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (aij, y, 8) ; \
}
GrB_Info GB (_bind2nd_tran__div_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
allocArray.c | /*
Copyright 2009 Sylvain Foret (sylvain.foret@anu.edu.au)
This file is part of Velvet.
Velvet is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
Velvet is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Velvet; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "allocArray.h"
#include "utility.h"
#ifdef DEBUG
#define NB_PAGES_ALLOC 1
#define BLOCKS_ALLOC_SIZE 1
#else
#define NB_PAGES_ALLOC 128
#define BLOCKS_ALLOC_SIZE 128
#endif
#ifndef VBIGASSEMBLY
#define INDEX_LENGTH 32
#else
#define INDEX_LENGTH 64
#endif
struct AllocArrayFreeElement_st
{
AllocArrayFreeElement *next;
ArrayIdx idx;
};
static void initAllocArray(AllocArray * array, size_t elementSize, char * name)
{
array->freeElements = NULL;
array->elementSize = elementSize;
array->blockSize = sysconf (_SC_PAGESIZE) * NB_PAGES_ALLOC;
array->maxElements = array->blockSize / array->elementSize;
array->maxBlocks = BLOCKS_ALLOC_SIZE;
array->blocks = mallocOrExit (array->maxBlocks, void*);
array->blocks[0] = mallocOrExit (array->blockSize, char);
array->currentBlocks = 1;
array->currentElements = 0;
#ifdef DEBUG
array->elementsRecycled = 0;
array->elementsAllocated = 0;
array->name = name;
#endif
#ifdef _OPENMP
array->nbThreads = 0;
#endif
}
AllocArray*
newAllocArray (size_t elementSize, char *name)
{
AllocArray *array;
if (elementSize < sizeof(AllocArrayFreeElement)) {
velvetLog("Elements too small to create an AllocArray!\n");
exit(-1);
}
array = mallocOrExit (1, AllocArray);
initAllocArray(array, elementSize, name);
return array;
}
void
destroyAllocArrayChunks (AllocArray *array)
{
size_t i;
for (i = 0; i < array->currentBlocks; i++)
free (array->blocks[i]);
free (array->blocks);
}
void
destroyAllocArray (AllocArray *array)
{
if (array)
{
destroyAllocArrayChunks(array);
#ifdef DEBUG
velvetLog(">>> Allocation summary for %s\n", array->name);
velvetLog(">>> Alloc'ed %ld bytes\n", array->blockSize * array->currentBlocks);
velvetLog(">>> Alloc'ed %ld elements\n", array->elementsAllocated);
velvetLog(">>> Recycled %ld elements\n", array->elementsRecycled);
#endif
free (array);
}
}
ArrayIdx
allocArrayAllocate (AllocArray *array)
{
if (array->freeElements != NULL)
{
AllocArrayFreeElement *element;
element = array->freeElements;
array->freeElements = element->next;
#ifdef DEBUG
array->elementsRecycled++;
#endif
return element->idx;
}
if (array->currentElements >= array->maxElements)
{
if (array->currentBlocks == array->maxBlocks)
{
array->maxBlocks += BLOCKS_ALLOC_SIZE;
array->blocks = reallocOrExit (array->blocks, array->maxBlocks, void*);
}
array->blocks[array->currentBlocks] = mallocOrExit (array->blockSize, char);
array->currentBlocks++;
array->currentElements = 0;
}
array->currentElements++;
#ifndef VBIGASSEMBLY
if (array->maxElements * (array->currentBlocks - 1) + array->currentElements == UINT32_MAX)
{
#ifdef DEBUG
velvetLog (">>> Reached maximum `%s' addressable with %i bits\n", array->name, INDEX_LENGTH);
#else
velvetLog (">>> Reached maximum elements addressable with %i bits\n", INDEX_LENGTH);
#endif
abort();
}
#endif
#ifdef DEBUG
array->elementsAllocated++;
#endif
return array->maxElements * (array->currentBlocks - 1) + array->currentElements;
}
static void*
allocArrayGetElement (AllocArray *array, ArrayIdx idx)
{
if (idx != NULL_IDX)
{
const ArrayIdx i = idx - 1;
const ArrayIdx blockIdx = i / array->maxElements;
const ArrayIdx elementIdx = i % array->maxElements;
return ((char*)(array->blocks[blockIdx])) + elementIdx * array->elementSize;
}
return NULL;
}
void
allocArrayFree (AllocArray *array, ArrayIdx idx)
{
if (idx != NULL_IDX)
{
AllocArrayFreeElement *freeElem;
freeElem = allocArrayGetElement (array, idx);
freeElem->idx = idx;
freeElem->next = array->freeElements;
array->freeElements = freeElem;
}
}
#ifdef _OPENMP
#define BLOCKS_ALLOC_SHIFT 16
static void initAllocArrayArray(AllocArray *array,
void *blocks,
size_t nbBlocks,
size_t elementSize,
char *name,
int nbThreads,
int thisThread)
{
array->freeElements = NULL;
array->elementSize = elementSize;
array->blockSize = (((size_t) 1) << BLOCKS_ALLOC_SHIFT) * elementSize;
array->maxElements = (((size_t) 1) << BLOCKS_ALLOC_SHIFT);
array->maxBlocks = nbBlocks;
array->blocks = blocks;
array->blocks[thisThread] = callocOrExit(array->blockSize, char);
array->currentBlocks = thisThread;
array->currentElements = 0;
array->nbThreads = nbThreads;
#ifdef DEBUG
array->elementsRecycled = 0;
array->elementsAllocated = 0;
array->name = name;
#endif
}
AllocArray *newAllocArrayArray(unsigned int n,
size_t elementSize,
char * name)
{
AllocArray *allocArray;
void **blocks;
size_t nbBlocks;
int i;
allocArray = callocOrExit (n + 1, AllocArray);
nbBlocks = (((size_t) 1) << (INDEX_LENGTH - BLOCKS_ALLOC_SHIFT));
blocks = callocOrExit(nbBlocks, void*);
for (i = 0; i < n; i++)
initAllocArrayArray(allocArray + i,
blocks,
nbBlocks,
elementSize,
name,
n, i);
/* Last element marker */
allocArray[n].currentBlocks = n;
return allocArray;
}
void destroyAllocArrayArray(AllocArray *allocArray)
{
int i;
for (i = 0; i < allocArray[0].maxBlocks; i++)
if (allocArray[0].blocks[i] != NULL)
free(allocArray[0].blocks[i]);
free(allocArray[0].blocks);
free(allocArray);
}
ArrayIdx allocArrayArrayAllocate(AllocArray *array)
{
int thread = omp_get_thread_num();
AllocArray * lastArray = array + array->nbThreads;
array += thread;
if (array->freeElements != NULL)
{
AllocArrayFreeElement *element;
element = array->freeElements;
array->freeElements = element->next;
#ifdef DEBUG
array->elementsRecycled++;
#endif
return element->idx;
}
if (array->currentElements >= array->maxElements)
{
#pragma omp critical
array->currentBlocks = lastArray->currentBlocks++;
if (array->currentBlocks >= array->maxBlocks)
{
#ifdef DEBUG
velvetLog(">>> Reached maximum `%s' addressable with %i bits\n",
array->name, INDEX_LENGTH);
#else
velvetLog(">>> Reached maximum elements addressable with %i bits\n", INDEX_LENGTH);
#endif
abort();
}
array->blocks[array->currentBlocks] = callocOrExit(array->blockSize, char);
array->currentElements = 0;
}
array->currentElements++;
#ifdef DEBUG
array->elementsAllocated++;
#endif
return array->maxElements * array->currentBlocks + array->currentElements;
}
void
allocArrayArrayFree(AllocArray *array, ArrayIdx idx)
{
if (idx != NULL_IDX)
{
AllocArrayFreeElement *freeElem;
freeElem = allocArrayGetElement(array, idx);
freeElem->idx = idx;
freeElem->next = array->freeElements;
array->freeElements = freeElem;
}
}
#endif
|
GB_unaryop__abs_int32_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int32_fp64
// op(A') function: GB_tran__abs_int32_fp64
// C type: int32_t
// A type: double
// cast: int32_t cij ; GB_CAST_SIGNED(cij,aij,32)
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, x) \
int32_t z ; GB_CAST_SIGNED(z,x,32) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT32 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int32_fp64
(
int32_t *restrict Cx,
const double *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int32_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_bool_int16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_bool_int16)
// op(A') function: GB (_unop_tran__identity_bool_int16)
// C type: bool
// A type: int16_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
bool z = (bool) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
bool z = (bool) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_bool_int16)
(
bool *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int16_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
bool z = (bool) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int16_t aij = Ax [p] ;
bool z = (bool) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_bool_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
thread-limit-2.c | /* { dg-do run } */
/* { dg-set-target-env-var OMP_THREAD_LIMIT "9" } */
#include <stdlib.h>
#include <unistd.h>
#include <omp.h>
int
main ()
{
if (omp_get_thread_limit () != 9)
return 0;
omp_set_dynamic (0);
#pragma omp parallel num_threads (8)
if (omp_get_num_threads () != 8)
abort ();
#pragma omp parallel num_threads (16)
if (omp_get_num_threads () > 9)
abort ();
#pragma omp target if (0)
#pragma omp teams thread_limit (6)
{
if (omp_get_thread_limit () > 6)
abort ();
if (omp_get_thread_limit () == 6)
{
omp_set_dynamic (0);
omp_set_nested (1);
#pragma omp parallel num_threads (3)
if (omp_get_num_threads () != 3)
abort ();
#pragma omp parallel num_threads (3)
if (omp_get_num_threads () != 3)
abort ();
#pragma omp parallel num_threads (8)
if (omp_get_num_threads () > 6)
abort ();
#pragma omp parallel num_threads (6)
if (omp_get_num_threads () != 6)
abort ();
int cnt = 0;
#pragma omp parallel num_threads (5)
#pragma omp parallel num_threads (5)
#pragma omp parallel num_threads (2)
{
int v;
#pragma omp atomic capture
v = ++cnt;
if (v > 6)
abort ();
usleep (10000);
#pragma omp atomic
--cnt;
}
}
}
return 0;
}
|
ipa-fnsummary.c | /* Function summary pass.
Copyright (C) 2003-2020 Free Software Foundation, Inc.
Contributed by Jan Hubicka
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
/* Analysis of function bodies used by inter-procedural passes
We estimate for each function
- function body size and size after specializing into given context
- average function execution time in a given context
- function frame size
For each call
- call statement size, time and how often the parameters change
ipa_fn_summary data structures store above information locally (i.e.
parameters of the function itself) and globally (i.e. parameters of
the function created by applying all the inline decisions already
present in the callgraph).
We provide access to the ipa_fn_summary data structure and
basic logic updating the parameters when inlining is performed.
The summaries are context sensitive. Context means
1) partial assignment of known constant values of operands
2) whether function is inlined into the call or not.
It is easy to add more variants. To represent function size and time
that depends on context (i.e. it is known to be optimized away when
context is known either by inlining or from IP-CP and cloning),
we use predicates.
estimate_edge_size_and_time can be used to query
function size/time in the given context. ipa_merge_fn_summary_after_inlining merges
properties of caller and callee after inlining.
Finally pass_inline_parameters is exported. This is used to drive
computation of function parameters used by the early inliner. IPA
inlined performs analysis via its analyze_function method. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "backend.h"
#include "tree.h"
#include "gimple.h"
#include "alloc-pool.h"
#include "tree-pass.h"
#include "ssa.h"
#include "tree-streamer.h"
#include "cgraph.h"
#include "diagnostic.h"
#include "fold-const.h"
#include "print-tree.h"
#include "tree-inline.h"
#include "gimple-pretty-print.h"
#include "cfganal.h"
#include "gimple-iterator.h"
#include "tree-cfg.h"
#include "tree-ssa-loop-niter.h"
#include "tree-ssa-loop.h"
#include "symbol-summary.h"
#include "ipa-prop.h"
#include "ipa-fnsummary.h"
#include "cfgloop.h"
#include "tree-scalar-evolution.h"
#include "ipa-utils.h"
#include "cfgexpand.h"
#include "gimplify.h"
#include "stringpool.h"
#include "attribs.h"
#include "tree-into-ssa.h"
/* Summaries. */
fast_function_summary <ipa_fn_summary *, va_gc> *ipa_fn_summaries;
fast_function_summary <ipa_size_summary *, va_heap> *ipa_size_summaries;
fast_call_summary <ipa_call_summary *, va_heap> *ipa_call_summaries;
/* Edge predicates goes here. */
static object_allocator<predicate> edge_predicate_pool ("edge predicates");
/* Dump IPA hints. */
void
ipa_dump_hints (FILE *f, ipa_hints hints)
{
if (!hints)
return;
fprintf (f, "IPA hints:");
if (hints & INLINE_HINT_indirect_call)
{
hints &= ~INLINE_HINT_indirect_call;
fprintf (f, " indirect_call");
}
if (hints & INLINE_HINT_loop_iterations)
{
hints &= ~INLINE_HINT_loop_iterations;
fprintf (f, " loop_iterations");
}
if (hints & INLINE_HINT_loop_stride)
{
hints &= ~INLINE_HINT_loop_stride;
fprintf (f, " loop_stride");
}
if (hints & INLINE_HINT_same_scc)
{
hints &= ~INLINE_HINT_same_scc;
fprintf (f, " same_scc");
}
if (hints & INLINE_HINT_in_scc)
{
hints &= ~INLINE_HINT_in_scc;
fprintf (f, " in_scc");
}
if (hints & INLINE_HINT_cross_module)
{
hints &= ~INLINE_HINT_cross_module;
fprintf (f, " cross_module");
}
if (hints & INLINE_HINT_declared_inline)
{
hints &= ~INLINE_HINT_declared_inline;
fprintf (f, " declared_inline");
}
if (hints & INLINE_HINT_known_hot)
{
hints &= ~INLINE_HINT_known_hot;
fprintf (f, " known_hot");
}
gcc_assert (!hints);
}
/* Record SIZE and TIME to SUMMARY.
The accounted code will be executed when EXEC_PRED is true.
When NONCONST_PRED is false the code will evaluate to constant and
will get optimized out in specialized clones of the function.
If CALL is true account to call_size_time_table rather than
size_time_table. */
void
ipa_fn_summary::account_size_time (int size, sreal time,
const predicate &exec_pred,
const predicate &nonconst_pred_in,
bool call)
{
size_time_entry *e;
bool found = false;
int i;
predicate nonconst_pred;
vec<size_time_entry, va_gc> *table = call
? call_size_time_table : size_time_table;
if (exec_pred == false)
return;
nonconst_pred = nonconst_pred_in & exec_pred;
if (nonconst_pred == false)
return;
/* We need to create initial empty unconditional clause, but otherwise
we don't need to account empty times and sizes. */
if (!size && time == 0 && table)
return;
/* Only for calls we are unaccounting what we previously recorded. */
gcc_checking_assert (time >= 0 || call);
for (i = 0; vec_safe_iterate (table, i, &e); i++)
if (e->exec_predicate == exec_pred
&& e->nonconst_predicate == nonconst_pred)
{
found = true;
break;
}
if (i == max_size_time_table_size)
{
i = 0;
found = true;
e = &(*table)[0];
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
"\t\tReached limit on number of entries, "
"ignoring the predicate.");
}
if (dump_file && (dump_flags & TDF_DETAILS) && (time != 0 || size))
{
fprintf (dump_file,
"\t\tAccounting size:%3.2f, time:%3.2f on %spredicate exec:",
((double) size) / ipa_fn_summary::size_scale,
(time.to_double ()), found ? "" : "new ");
exec_pred.dump (dump_file, conds, 0);
if (exec_pred != nonconst_pred)
{
fprintf (dump_file, " nonconst:");
nonconst_pred.dump (dump_file, conds);
}
else
fprintf (dump_file, "\n");
}
if (!found)
{
class size_time_entry new_entry;
new_entry.size = size;
new_entry.time = time;
new_entry.exec_predicate = exec_pred;
new_entry.nonconst_predicate = nonconst_pred;
if (call)
vec_safe_push (call_size_time_table, new_entry);
else
vec_safe_push (size_time_table, new_entry);
}
else
{
e->size += size;
e->time += time;
/* FIXME: PR bootstrap/92653 gcc_checking_assert (e->time >= -1); */
/* Tolerate small roundoff issues. */
if (e->time < 0)
e->time = 0;
}
}
/* We proved E to be unreachable, redirect it to __builtin_unreachable. */
static struct cgraph_edge *
redirect_to_unreachable (struct cgraph_edge *e)
{
struct cgraph_node *callee = !e->inline_failed ? e->callee : NULL;
struct cgraph_node *target = cgraph_node::get_create
(builtin_decl_implicit (BUILT_IN_UNREACHABLE));
if (e->speculative)
e = cgraph_edge::resolve_speculation (e, target->decl);
else if (!e->callee)
e = cgraph_edge::make_direct (e, target);
else
e->redirect_callee (target);
class ipa_call_summary *es = ipa_call_summaries->get (e);
e->inline_failed = CIF_UNREACHABLE;
e->count = profile_count::zero ();
es->call_stmt_size = 0;
es->call_stmt_time = 0;
if (callee)
callee->remove_symbol_and_inline_clones ();
return e;
}
/* Set predicate for edge E. */
static void
edge_set_predicate (struct cgraph_edge *e, predicate *predicate)
{
/* If the edge is determined to be never executed, redirect it
to BUILTIN_UNREACHABLE to make it clear to IPA passes the call will
be optimized out. */
if (predicate && *predicate == false
/* When handling speculative edges, we need to do the redirection
just once. Do it always on the direct edge, so we do not
attempt to resolve speculation while duplicating the edge. */
&& (!e->speculative || e->callee))
e = redirect_to_unreachable (e);
class ipa_call_summary *es = ipa_call_summaries->get (e);
if (predicate && *predicate != true)
{
if (!es->predicate)
es->predicate = edge_predicate_pool.allocate ();
*es->predicate = *predicate;
}
else
{
if (es->predicate)
edge_predicate_pool.remove (es->predicate);
es->predicate = NULL;
}
}
/* Set predicate for hint *P. */
static void
set_hint_predicate (predicate **p, predicate new_predicate)
{
if (new_predicate == false || new_predicate == true)
{
if (*p)
edge_predicate_pool.remove (*p);
*p = NULL;
}
else
{
if (!*p)
*p = edge_predicate_pool.allocate ();
**p = new_predicate;
}
}
/* Compute what conditions may or may not hold given information about
parameters. RET_CLAUSE returns truths that may hold in a specialized copy,
while RET_NONSPEC_CLAUSE returns truths that may hold in an nonspecialized
copy when called in a given context. It is a bitmask of conditions. Bit
0 means that condition is known to be false, while bit 1 means that condition
may or may not be true. These differs - for example NOT_INLINED condition
is always false in the second and also builtin_constant_p tests cannot use
the fact that parameter is indeed a constant.
KNOWN_VALS is partial mapping of parameters of NODE to constant values.
KNOWN_AGGS is a vector of aggregate known offset/value set for each
parameter. Return clause of possible truths. When INLINE_P is true, assume
that we are inlining.
ERROR_MARK means compile time invariant. */
static void
evaluate_conditions_for_known_args (struct cgraph_node *node,
bool inline_p,
vec<tree> known_vals,
vec<value_range> known_value_ranges,
vec<ipa_agg_value_set> known_aggs,
clause_t *ret_clause,
clause_t *ret_nonspec_clause)
{
clause_t clause = inline_p ? 0 : 1 << predicate::not_inlined_condition;
clause_t nonspec_clause = 1 << predicate::not_inlined_condition;
class ipa_fn_summary *info = ipa_fn_summaries->get (node);
int i;
struct condition *c;
for (i = 0; vec_safe_iterate (info->conds, i, &c); i++)
{
tree val = NULL;
tree res;
int j;
struct expr_eval_op *op;
/* We allow call stmt to have fewer arguments than the callee function
(especially for K&R style programs). So bound check here (we assume
known_aggs vector, if non-NULL, has the same length as
known_vals). */
gcc_checking_assert (!known_aggs.length () || !known_vals.length ()
|| (known_vals.length () == known_aggs.length ()));
if (c->agg_contents)
{
struct ipa_agg_value_set *agg;
if (c->code == predicate::changed
&& !c->by_ref
&& c->operand_num < (int)known_vals.length ()
&& (known_vals[c->operand_num] == error_mark_node))
continue;
if (c->operand_num < (int)known_aggs.length ())
{
agg = &known_aggs[c->operand_num];
val = ipa_find_agg_cst_for_param (agg,
c->operand_num
< (int) known_vals.length ()
? known_vals[c->operand_num]
: NULL,
c->offset, c->by_ref);
}
else
val = NULL_TREE;
}
else if (c->operand_num < (int) known_vals.length ())
{
val = known_vals[c->operand_num];
if (val == error_mark_node && c->code != predicate::changed)
val = NULL_TREE;
}
if (!val
&& (c->code == predicate::changed
|| c->code == predicate::is_not_constant))
{
clause |= 1 << (i + predicate::first_dynamic_condition);
nonspec_clause |= 1 << (i + predicate::first_dynamic_condition);
continue;
}
if (c->code == predicate::changed)
{
nonspec_clause |= 1 << (i + predicate::first_dynamic_condition);
continue;
}
if (c->code == predicate::is_not_constant)
{
nonspec_clause |= 1 << (i + predicate::first_dynamic_condition);
continue;
}
if (val && TYPE_SIZE (c->type) == TYPE_SIZE (TREE_TYPE (val)))
{
if (c->type != TREE_TYPE (val))
val = fold_unary (VIEW_CONVERT_EXPR, c->type, val);
for (j = 0; vec_safe_iterate (c->param_ops, j, &op); j++)
{
if (!val)
break;
if (!op->val[0])
val = fold_unary (op->code, op->type, val);
else if (!op->val[1])
val = fold_binary (op->code, op->type,
op->index ? op->val[0] : val,
op->index ? val : op->val[0]);
else if (op->index == 0)
val = fold_ternary (op->code, op->type,
val, op->val[0], op->val[1]);
else if (op->index == 1)
val = fold_ternary (op->code, op->type,
op->val[0], val, op->val[1]);
else if (op->index == 2)
val = fold_ternary (op->code, op->type,
op->val[0], op->val[1], val);
else
val = NULL_TREE;
}
res = val
? fold_binary_to_constant (c->code, boolean_type_node, val, c->val)
: NULL;
if (res && integer_zerop (res))
continue;
if (res && integer_onep (res))
{
clause |= 1 << (i + predicate::first_dynamic_condition);
nonspec_clause |= 1 << (i + predicate::first_dynamic_condition);
continue;
}
}
if (c->operand_num < (int) known_value_ranges.length ()
&& !c->agg_contents
&& !known_value_ranges[c->operand_num].undefined_p ()
&& !known_value_ranges[c->operand_num].varying_p ()
&& TYPE_SIZE (c->type)
== TYPE_SIZE (known_value_ranges[c->operand_num].type ())
&& (!val || TREE_CODE (val) != INTEGER_CST))
{
value_range vr = known_value_ranges[c->operand_num];
if (!useless_type_conversion_p (c->type, vr.type ()))
{
value_range res;
range_fold_unary_expr (&res, NOP_EXPR,
c->type, &vr, vr.type ());
vr = res;
}
tree type = c->type;
for (j = 0; vec_safe_iterate (c->param_ops, j, &op); j++)
{
if (vr.varying_p () || vr.undefined_p ())
break;
value_range res;
if (!op->val[0])
range_fold_unary_expr (&res, op->code, op->type, &vr, type);
else if (!op->val[1])
{
value_range op0 (op->val[0], op->val[0]);
range_fold_binary_expr (&res, op->code, op->type,
op->index ? &op0 : &vr,
op->index ? &vr : &op0);
}
else
gcc_unreachable ();
type = op->type;
vr = res;
}
if (!vr.varying_p () && !vr.undefined_p ())
{
value_range res;
value_range val_vr (c->val, c->val);
range_fold_binary_expr (&res, c->code, boolean_type_node,
&vr,
&val_vr);
if (res.zero_p ())
continue;
}
}
clause |= 1 << (i + predicate::first_dynamic_condition);
nonspec_clause |= 1 << (i + predicate::first_dynamic_condition);
}
*ret_clause = clause;
if (ret_nonspec_clause)
*ret_nonspec_clause = nonspec_clause;
}
/* Return true if VRP will be exectued on the function.
We do not want to anticipate optimizations that will not happen.
FIXME: This can be confused with -fdisable and debug counters and thus
it should not be used for correctness (only to make heuristics work).
This means that inliner should do its own optimizations of expressions
that it predicts to be constant so wrong code can not be triggered by
builtin_constant_p. */
static bool
vrp_will_run_p (struct cgraph_node *node)
{
return (opt_for_fn (node->decl, optimize)
&& !opt_for_fn (node->decl, optimize_debug)
&& opt_for_fn (node->decl, flag_tree_vrp));
}
/* Similarly about FRE. */
static bool
fre_will_run_p (struct cgraph_node *node)
{
return (opt_for_fn (node->decl, optimize)
&& !opt_for_fn (node->decl, optimize_debug)
&& opt_for_fn (node->decl, flag_tree_fre));
}
/* Work out what conditions might be true at invocation of E.
Compute costs for inlined edge if INLINE_P is true.
Return in CLAUSE_PTR the evaluated conditions and in NONSPEC_CLAUSE_PTR
(if non-NULL) conditions evaluated for nonspecialized clone called
in a given context.
KNOWN_VALS_PTR and KNOWN_AGGS_PTR must be non-NULL and will be filled by
known constant and aggregate values of parameters.
KNOWN_CONTEXT_PTR, if non-NULL, will be filled by polymorphic call contexts
of parameter used by a polymorphic call. */
void
evaluate_properties_for_edge (struct cgraph_edge *e, bool inline_p,
clause_t *clause_ptr,
clause_t *nonspec_clause_ptr,
vec<tree> *known_vals_ptr,
vec<ipa_polymorphic_call_context>
*known_contexts_ptr,
vec<ipa_agg_value_set> *known_aggs_ptr)
{
struct cgraph_node *callee = e->callee->ultimate_alias_target ();
class ipa_fn_summary *info = ipa_fn_summaries->get (callee);
auto_vec<value_range, 32> known_value_ranges;
class ipa_edge_args *args;
if (clause_ptr)
*clause_ptr = inline_p ? 0 : 1 << predicate::not_inlined_condition;
if (ipa_node_params_sum
&& !e->call_stmt_cannot_inline_p
&& (info->conds || known_contexts_ptr)
&& (args = IPA_EDGE_REF (e)) != NULL)
{
struct cgraph_node *caller;
class ipa_node_params *caller_parms_info, *callee_pi = NULL;
class ipa_call_summary *es = ipa_call_summaries->get (e);
int i, count = ipa_get_cs_argument_count (args);
if (count)
{
if (e->caller->inlined_to)
caller = e->caller->inlined_to;
else
caller = e->caller;
caller_parms_info = IPA_NODE_REF (caller);
callee_pi = IPA_NODE_REF (callee);
/* Watch for thunks. */
if (callee_pi)
/* Watch for variadic functions. */
count = MIN (count, ipa_get_param_count (callee_pi));
}
if (callee_pi)
for (i = 0; i < count; i++)
{
struct ipa_jump_func *jf = ipa_get_ith_jump_func (args, i);
if (ipa_is_param_used_by_indirect_call (callee_pi, i)
|| ipa_is_param_used_by_ipa_predicates (callee_pi, i))
{
/* Determine if we know constant value of the parameter. */
tree cst = ipa_value_from_jfunc (caller_parms_info, jf,
ipa_get_type (callee_pi, i));
if (!cst && e->call_stmt
&& i < (int)gimple_call_num_args (e->call_stmt))
{
cst = gimple_call_arg (e->call_stmt, i);
if (!is_gimple_min_invariant (cst))
cst = NULL;
}
if (cst)
{
gcc_checking_assert (TREE_CODE (cst) != TREE_BINFO);
if (!known_vals_ptr->length ())
vec_safe_grow_cleared (known_vals_ptr, count);
(*known_vals_ptr)[i] = cst;
}
else if (inline_p && !es->param[i].change_prob)
{
if (!known_vals_ptr->length ())
vec_safe_grow_cleared (known_vals_ptr, count);
(*known_vals_ptr)[i] = error_mark_node;
}
/* If we failed to get simple constant, try value range. */
if ((!cst || TREE_CODE (cst) != INTEGER_CST)
&& vrp_will_run_p (caller)
&& ipa_is_param_used_by_ipa_predicates (callee_pi, i))
{
value_range vr
= ipa_value_range_from_jfunc (caller_parms_info, e, jf,
ipa_get_type (callee_pi,
i));
if (!vr.undefined_p () && !vr.varying_p ())
{
if (!known_value_ranges.length ())
known_value_ranges.safe_grow_cleared (count);
known_value_ranges[i] = vr;
}
}
/* Determine known aggregate values. */
if (fre_will_run_p (caller))
{
ipa_agg_value_set agg
= ipa_agg_value_set_from_jfunc (caller_parms_info,
caller, &jf->agg);
if (agg.items.length ())
{
if (!known_aggs_ptr->length ())
vec_safe_grow_cleared (known_aggs_ptr, count);
(*known_aggs_ptr)[i] = agg;
}
}
}
/* For calls used in polymorphic calls we further determine
polymorphic call context. */
if (known_contexts_ptr
&& ipa_is_param_used_by_polymorphic_call (callee_pi, i))
{
ipa_polymorphic_call_context
ctx = ipa_context_from_jfunc (caller_parms_info, e, i, jf);
if (!ctx.useless_p ())
{
if (!known_contexts_ptr->length ())
known_contexts_ptr->safe_grow_cleared (count);
(*known_contexts_ptr)[i]
= ipa_context_from_jfunc (caller_parms_info, e, i, jf);
}
}
}
else
gcc_assert (!count || callee->thunk.thunk_p);
}
else if (e->call_stmt && !e->call_stmt_cannot_inline_p && info->conds)
{
int i, count = (int)gimple_call_num_args (e->call_stmt);
for (i = 0; i < count; i++)
{
tree cst = gimple_call_arg (e->call_stmt, i);
if (!is_gimple_min_invariant (cst))
cst = NULL;
if (cst)
{
if (!known_vals_ptr->length ())
vec_safe_grow_cleared (known_vals_ptr, count);
(*known_vals_ptr)[i] = cst;
}
}
}
evaluate_conditions_for_known_args (callee, inline_p,
*known_vals_ptr,
known_value_ranges,
*known_aggs_ptr,
clause_ptr,
nonspec_clause_ptr);
}
/* Allocate the function summary. */
static void
ipa_fn_summary_alloc (void)
{
gcc_checking_assert (!ipa_fn_summaries);
ipa_size_summaries = new ipa_size_summary_t (symtab);
ipa_fn_summaries = ipa_fn_summary_t::create_ggc (symtab);
ipa_call_summaries = new ipa_call_summary_t (symtab);
}
ipa_call_summary::~ipa_call_summary ()
{
if (predicate)
edge_predicate_pool.remove (predicate);
param.release ();
}
ipa_fn_summary::~ipa_fn_summary ()
{
if (loop_iterations)
edge_predicate_pool.remove (loop_iterations);
if (loop_stride)
edge_predicate_pool.remove (loop_stride);
vec_free (conds);
vec_free (size_time_table);
vec_free (call_size_time_table);
}
void
ipa_fn_summary_t::remove_callees (cgraph_node *node)
{
cgraph_edge *e;
for (e = node->callees; e; e = e->next_callee)
ipa_call_summaries->remove (e);
for (e = node->indirect_calls; e; e = e->next_callee)
ipa_call_summaries->remove (e);
}
/* Same as remap_predicate_after_duplication but handle hint predicate *P.
Additionally care about allocating new memory slot for updated predicate
and set it to NULL when it becomes true or false (and thus uninteresting).
*/
static void
remap_hint_predicate_after_duplication (predicate **p,
clause_t possible_truths)
{
predicate new_predicate;
if (!*p)
return;
new_predicate = (*p)->remap_after_duplication (possible_truths);
/* We do not want to free previous predicate; it is used by node origin. */
*p = NULL;
set_hint_predicate (p, new_predicate);
}
/* Hook that is called by cgraph.c when a node is duplicated. */
void
ipa_fn_summary_t::duplicate (cgraph_node *src,
cgraph_node *dst,
ipa_fn_summary *,
ipa_fn_summary *info)
{
new (info) ipa_fn_summary (*ipa_fn_summaries->get (src));
/* TODO: as an optimization, we may avoid copying conditions
that are known to be false or true. */
info->conds = vec_safe_copy (info->conds);
/* When there are any replacements in the function body, see if we can figure
out that something was optimized out. */
if (ipa_node_params_sum && dst->clone.tree_map)
{
vec<size_time_entry, va_gc> *entry = info->size_time_table;
/* Use SRC parm info since it may not be copied yet. */
class ipa_node_params *parms_info = IPA_NODE_REF (src);
vec<tree> known_vals = vNULL;
int count = ipa_get_param_count (parms_info);
int i, j;
clause_t possible_truths;
predicate true_pred = true;
size_time_entry *e;
int optimized_out_size = 0;
bool inlined_to_p = false;
struct cgraph_edge *edge, *next;
info->size_time_table = 0;
known_vals.safe_grow_cleared (count);
for (i = 0; i < count; i++)
{
struct ipa_replace_map *r;
for (j = 0; vec_safe_iterate (dst->clone.tree_map, j, &r); j++)
{
if (r->parm_num == i)
{
known_vals[i] = r->new_tree;
break;
}
}
}
evaluate_conditions_for_known_args (dst, false,
known_vals,
vNULL,
vNULL,
&possible_truths,
/* We are going to specialize,
so ignore nonspec truths. */
NULL);
known_vals.release ();
info->account_size_time (0, 0, true_pred, true_pred);
/* Remap size_time vectors.
Simplify the predicate by pruning out alternatives that are known
to be false.
TODO: as on optimization, we can also eliminate conditions known
to be true. */
for (i = 0; vec_safe_iterate (entry, i, &e); i++)
{
predicate new_exec_pred;
predicate new_nonconst_pred;
new_exec_pred = e->exec_predicate.remap_after_duplication
(possible_truths);
new_nonconst_pred = e->nonconst_predicate.remap_after_duplication
(possible_truths);
if (new_exec_pred == false || new_nonconst_pred == false)
optimized_out_size += e->size;
else
info->account_size_time (e->size, e->time, new_exec_pred,
new_nonconst_pred);
}
/* Remap edge predicates with the same simplification as above.
Also copy constantness arrays. */
for (edge = dst->callees; edge; edge = next)
{
predicate new_predicate;
class ipa_call_summary *es = ipa_call_summaries->get (edge);
next = edge->next_callee;
if (!edge->inline_failed)
inlined_to_p = true;
if (!es->predicate)
continue;
new_predicate = es->predicate->remap_after_duplication
(possible_truths);
if (new_predicate == false && *es->predicate != false)
optimized_out_size += es->call_stmt_size * ipa_fn_summary::size_scale;
edge_set_predicate (edge, &new_predicate);
}
/* Remap indirect edge predicates with the same simplification as above.
Also copy constantness arrays. */
for (edge = dst->indirect_calls; edge; edge = next)
{
predicate new_predicate;
class ipa_call_summary *es = ipa_call_summaries->get (edge);
next = edge->next_callee;
gcc_checking_assert (edge->inline_failed);
if (!es->predicate)
continue;
new_predicate = es->predicate->remap_after_duplication
(possible_truths);
if (new_predicate == false && *es->predicate != false)
optimized_out_size += es->call_stmt_size * ipa_fn_summary::size_scale;
edge_set_predicate (edge, &new_predicate);
}
remap_hint_predicate_after_duplication (&info->loop_iterations,
possible_truths);
remap_hint_predicate_after_duplication (&info->loop_stride,
possible_truths);
/* If inliner or someone after inliner will ever start producing
non-trivial clones, we will get trouble with lack of information
about updating self sizes, because size vectors already contains
sizes of the callees. */
gcc_assert (!inlined_to_p || !optimized_out_size);
}
else
{
info->size_time_table = vec_safe_copy (info->size_time_table);
if (info->loop_iterations)
{
predicate p = *info->loop_iterations;
info->loop_iterations = NULL;
set_hint_predicate (&info->loop_iterations, p);
}
if (info->loop_stride)
{
predicate p = *info->loop_stride;
info->loop_stride = NULL;
set_hint_predicate (&info->loop_stride, p);
}
}
if (!dst->inlined_to)
ipa_update_overall_fn_summary (dst);
}
/* Hook that is called by cgraph.c when a node is duplicated. */
void
ipa_call_summary_t::duplicate (struct cgraph_edge *src,
struct cgraph_edge *dst,
class ipa_call_summary *srcinfo,
class ipa_call_summary *info)
{
new (info) ipa_call_summary (*srcinfo);
info->predicate = NULL;
edge_set_predicate (dst, srcinfo->predicate);
info->param = srcinfo->param.copy ();
if (!dst->indirect_unknown_callee && src->indirect_unknown_callee)
{
info->call_stmt_size -= (eni_size_weights.indirect_call_cost
- eni_size_weights.call_cost);
info->call_stmt_time -= (eni_time_weights.indirect_call_cost
- eni_time_weights.call_cost);
}
}
/* Dump edge summaries associated to NODE and recursively to all clones.
Indent by INDENT. */
static void
dump_ipa_call_summary (FILE *f, int indent, struct cgraph_node *node,
class ipa_fn_summary *info)
{
struct cgraph_edge *edge;
for (edge = node->callees; edge; edge = edge->next_callee)
{
class ipa_call_summary *es = ipa_call_summaries->get (edge);
struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
int i;
fprintf (f,
"%*s%s %s\n%*s freq:%4.2f",
indent, "", callee->dump_name (),
!edge->inline_failed
? "inlined" : cgraph_inline_failed_string (edge-> inline_failed),
indent, "", edge->sreal_frequency ().to_double ());
if (cross_module_call_p (edge))
fprintf (f, " cross module");
if (es)
fprintf (f, " loop depth:%2i size:%2i time: %2i",
es->loop_depth, es->call_stmt_size, es->call_stmt_time);
ipa_fn_summary *s = ipa_fn_summaries->get (callee);
ipa_size_summary *ss = ipa_size_summaries->get (callee);
if (s != NULL)
fprintf (f, " callee size:%2i stack:%2i",
(int) (ss->size / ipa_fn_summary::size_scale),
(int) s->estimated_stack_size);
if (es && es->predicate)
{
fprintf (f, " predicate: ");
es->predicate->dump (f, info->conds);
}
else
fprintf (f, "\n");
if (es && es->param.exists ())
for (i = 0; i < (int) es->param.length (); i++)
{
int prob = es->param[i].change_prob;
if (!prob)
fprintf (f, "%*s op%i is compile time invariant\n",
indent + 2, "", i);
else if (prob != REG_BR_PROB_BASE)
fprintf (f, "%*s op%i change %f%% of time\n", indent + 2, "", i,
prob * 100.0 / REG_BR_PROB_BASE);
}
if (!edge->inline_failed)
{
ipa_size_summary *ss = ipa_size_summaries->get (callee);
fprintf (f, "%*sStack frame offset %i, callee self size %i\n",
indent + 2, "",
(int) ipa_get_stack_frame_offset (callee),
(int) ss->estimated_self_stack_size);
dump_ipa_call_summary (f, indent + 2, callee, info);
}
}
for (edge = node->indirect_calls; edge; edge = edge->next_callee)
{
class ipa_call_summary *es = ipa_call_summaries->get (edge);
fprintf (f, "%*sindirect call loop depth:%2i freq:%4.2f size:%2i"
" time: %2i",
indent, "",
es->loop_depth,
edge->sreal_frequency ().to_double (), es->call_stmt_size,
es->call_stmt_time);
if (es->predicate)
{
fprintf (f, "predicate: ");
es->predicate->dump (f, info->conds);
}
else
fprintf (f, "\n");
}
}
void
ipa_dump_fn_summary (FILE *f, struct cgraph_node *node)
{
if (node->definition)
{
class ipa_fn_summary *s = ipa_fn_summaries->get (node);
class ipa_size_summary *ss = ipa_size_summaries->get (node);
if (s != NULL)
{
size_time_entry *e;
int i;
fprintf (f, "IPA function summary for %s", node->dump_name ());
if (DECL_DISREGARD_INLINE_LIMITS (node->decl))
fprintf (f, " always_inline");
if (s->inlinable)
fprintf (f, " inlinable");
if (s->fp_expressions)
fprintf (f, " fp_expression");
fprintf (f, "\n global time: %f\n", s->time.to_double ());
fprintf (f, " self size: %i\n", ss->self_size);
fprintf (f, " global size: %i\n", ss->size);
fprintf (f, " min size: %i\n", s->min_size);
fprintf (f, " self stack: %i\n",
(int) ss->estimated_self_stack_size);
fprintf (f, " global stack: %i\n", (int) s->estimated_stack_size);
if (s->growth)
fprintf (f, " estimated growth:%i\n", (int) s->growth);
if (s->scc_no)
fprintf (f, " In SCC: %i\n", (int) s->scc_no);
for (i = 0; vec_safe_iterate (s->size_time_table, i, &e); i++)
{
fprintf (f, " size:%f, time:%f",
(double) e->size / ipa_fn_summary::size_scale,
e->time.to_double ());
if (e->exec_predicate != true)
{
fprintf (f, ", executed if:");
e->exec_predicate.dump (f, s->conds, 0);
}
if (e->exec_predicate != e->nonconst_predicate)
{
fprintf (f, ", nonconst if:");
e->nonconst_predicate.dump (f, s->conds, 0);
}
fprintf (f, "\n");
}
if (s->loop_iterations)
{
fprintf (f, " loop iterations:");
s->loop_iterations->dump (f, s->conds);
}
if (s->loop_stride)
{
fprintf (f, " loop stride:");
s->loop_stride->dump (f, s->conds);
}
fprintf (f, " calls:\n");
dump_ipa_call_summary (f, 4, node, s);
fprintf (f, "\n");
}
else
fprintf (f, "IPA summary for %s is missing.\n", node->dump_name ());
}
}
DEBUG_FUNCTION void
ipa_debug_fn_summary (struct cgraph_node *node)
{
ipa_dump_fn_summary (stderr, node);
}
void
ipa_dump_fn_summaries (FILE *f)
{
struct cgraph_node *node;
FOR_EACH_DEFINED_FUNCTION (node)
if (!node->inlined_to)
ipa_dump_fn_summary (f, node);
}
/* Callback of walk_aliased_vdefs. Flags that it has been invoked to the
boolean variable pointed to by DATA. */
static bool
mark_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef ATTRIBUTE_UNUSED,
void *data)
{
bool *b = (bool *) data;
*b = true;
return true;
}
/* If OP refers to value of function parameter, return the corresponding
parameter. If non-NULL, the size of the memory load (or the SSA_NAME of the
PARM_DECL) will be stored to *SIZE_P in that case too. */
static tree
unmodified_parm_1 (ipa_func_body_info *fbi, gimple *stmt, tree op,
poly_int64 *size_p)
{
/* SSA_NAME referring to parm default def? */
if (TREE_CODE (op) == SSA_NAME
&& SSA_NAME_IS_DEFAULT_DEF (op)
&& TREE_CODE (SSA_NAME_VAR (op)) == PARM_DECL)
{
if (size_p)
*size_p = tree_to_poly_int64 (TYPE_SIZE (TREE_TYPE (op)));
return SSA_NAME_VAR (op);
}
/* Non-SSA parm reference? */
if (TREE_CODE (op) == PARM_DECL)
{
bool modified = false;
ao_ref refd;
ao_ref_init (&refd, op);
int walked = walk_aliased_vdefs (&refd, gimple_vuse (stmt),
mark_modified, &modified, NULL, NULL,
fbi->aa_walk_budget + 1);
if (walked < 0)
{
fbi->aa_walk_budget = 0;
return NULL_TREE;
}
if (!modified)
{
if (size_p)
*size_p = tree_to_poly_int64 (TYPE_SIZE (TREE_TYPE (op)));
return op;
}
}
return NULL_TREE;
}
/* If OP refers to value of function parameter, return the corresponding
parameter. Also traverse chains of SSA register assignments. If non-NULL,
the size of the memory load (or the SSA_NAME of the PARM_DECL) will be
stored to *SIZE_P in that case too. */
static tree
unmodified_parm (ipa_func_body_info *fbi, gimple *stmt, tree op,
poly_int64 *size_p)
{
tree res = unmodified_parm_1 (fbi, stmt, op, size_p);
if (res)
return res;
if (TREE_CODE (op) == SSA_NAME
&& !SSA_NAME_IS_DEFAULT_DEF (op)
&& gimple_assign_single_p (SSA_NAME_DEF_STMT (op)))
return unmodified_parm (fbi, SSA_NAME_DEF_STMT (op),
gimple_assign_rhs1 (SSA_NAME_DEF_STMT (op)),
size_p);
return NULL_TREE;
}
/* If OP refers to a value of a function parameter or value loaded from an
aggregate passed to a parameter (either by value or reference), return TRUE
and store the number of the parameter to *INDEX_P, the access size into
*SIZE_P, and information whether and how it has been loaded from an
aggregate into *AGGPOS. INFO describes the function parameters, STMT is the
statement in which OP is used or loaded. */
static bool
unmodified_parm_or_parm_agg_item (struct ipa_func_body_info *fbi,
gimple *stmt, tree op, int *index_p,
poly_int64 *size_p,
struct agg_position_info *aggpos)
{
tree res = unmodified_parm_1 (fbi, stmt, op, size_p);
gcc_checking_assert (aggpos);
if (res)
{
*index_p = ipa_get_param_decl_index (fbi->info, res);
if (*index_p < 0)
return false;
aggpos->agg_contents = false;
aggpos->by_ref = false;
return true;
}
if (TREE_CODE (op) == SSA_NAME)
{
if (SSA_NAME_IS_DEFAULT_DEF (op)
|| !gimple_assign_single_p (SSA_NAME_DEF_STMT (op)))
return false;
stmt = SSA_NAME_DEF_STMT (op);
op = gimple_assign_rhs1 (stmt);
if (!REFERENCE_CLASS_P (op))
return unmodified_parm_or_parm_agg_item (fbi, stmt, op, index_p, size_p,
aggpos);
}
aggpos->agg_contents = true;
return ipa_load_from_parm_agg (fbi, fbi->info->descriptors,
stmt, op, index_p, &aggpos->offset,
size_p, &aggpos->by_ref);
}
/* See if statement might disappear after inlining.
0 - means not eliminated
1 - half of statements goes away
2 - for sure it is eliminated.
We are not terribly sophisticated, basically looking for simple abstraction
penalty wrappers. */
static int
eliminated_by_inlining_prob (ipa_func_body_info *fbi, gimple *stmt)
{
enum gimple_code code = gimple_code (stmt);
enum tree_code rhs_code;
if (!optimize)
return 0;
switch (code)
{
case GIMPLE_RETURN:
return 2;
case GIMPLE_ASSIGN:
if (gimple_num_ops (stmt) != 2)
return 0;
rhs_code = gimple_assign_rhs_code (stmt);
/* Casts of parameters, loads from parameters passed by reference
and stores to return value or parameters are often free after
inlining due to SRA and further combining.
Assume that half of statements goes away. */
if (CONVERT_EXPR_CODE_P (rhs_code)
|| rhs_code == VIEW_CONVERT_EXPR
|| rhs_code == ADDR_EXPR
|| gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
{
tree rhs = gimple_assign_rhs1 (stmt);
tree lhs = gimple_assign_lhs (stmt);
tree inner_rhs = get_base_address (rhs);
tree inner_lhs = get_base_address (lhs);
bool rhs_free = false;
bool lhs_free = false;
if (!inner_rhs)
inner_rhs = rhs;
if (!inner_lhs)
inner_lhs = lhs;
/* Reads of parameter are expected to be free. */
if (unmodified_parm (fbi, stmt, inner_rhs, NULL))
rhs_free = true;
/* Match expressions of form &this->field. Those will most likely
combine with something upstream after inlining. */
else if (TREE_CODE (inner_rhs) == ADDR_EXPR)
{
tree op = get_base_address (TREE_OPERAND (inner_rhs, 0));
if (TREE_CODE (op) == PARM_DECL)
rhs_free = true;
else if (TREE_CODE (op) == MEM_REF
&& unmodified_parm (fbi, stmt, TREE_OPERAND (op, 0),
NULL))
rhs_free = true;
}
/* When parameter is not SSA register because its address is taken
and it is just copied into one, the statement will be completely
free after inlining (we will copy propagate backward). */
if (rhs_free && is_gimple_reg (lhs))
return 2;
/* Reads of parameters passed by reference
expected to be free (i.e. optimized out after inlining). */
if (TREE_CODE (inner_rhs) == MEM_REF
&& unmodified_parm (fbi, stmt, TREE_OPERAND (inner_rhs, 0), NULL))
rhs_free = true;
/* Copying parameter passed by reference into gimple register is
probably also going to copy propagate, but we can't be quite
sure. */
if (rhs_free && is_gimple_reg (lhs))
lhs_free = true;
/* Writes to parameters, parameters passed by value and return value
(either directly or passed via invisible reference) are free.
TODO: We ought to handle testcase like
struct a {int a,b;};
struct a
returnstruct (void)
{
struct a a ={1,2};
return a;
}
This translate into:
returnstruct ()
{
int a$b;
int a$a;
struct a a;
struct a D.2739;
<bb 2>:
D.2739.a = 1;
D.2739.b = 2;
return D.2739;
}
For that we either need to copy ipa-split logic detecting writes
to return value. */
if (TREE_CODE (inner_lhs) == PARM_DECL
|| TREE_CODE (inner_lhs) == RESULT_DECL
|| (TREE_CODE (inner_lhs) == MEM_REF
&& (unmodified_parm (fbi, stmt, TREE_OPERAND (inner_lhs, 0),
NULL)
|| (TREE_CODE (TREE_OPERAND (inner_lhs, 0)) == SSA_NAME
&& SSA_NAME_VAR (TREE_OPERAND (inner_lhs, 0))
&& TREE_CODE (SSA_NAME_VAR (TREE_OPERAND
(inner_lhs,
0))) == RESULT_DECL))))
lhs_free = true;
if (lhs_free
&& (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
rhs_free = true;
if (lhs_free && rhs_free)
return 1;
}
return 0;
default:
return 0;
}
}
/* Analyze EXPR if it represents a series of simple operations performed on
a function parameter and return true if so. FBI, STMT, EXPR, INDEX_P and
AGGPOS have the same meaning like in unmodified_parm_or_parm_agg_item.
Type of the parameter or load from an aggregate via the parameter is
stored in *TYPE_P. Operations on the parameter are recorded to
PARAM_OPS_P if it is not NULL. */
static bool
decompose_param_expr (struct ipa_func_body_info *fbi,
gimple *stmt, tree expr,
int *index_p, tree *type_p,
struct agg_position_info *aggpos,
expr_eval_ops *param_ops_p = NULL)
{
int op_limit = opt_for_fn (fbi->node->decl, param_ipa_max_param_expr_ops);
int op_count = 0;
if (param_ops_p)
*param_ops_p = NULL;
while (true)
{
expr_eval_op eval_op;
unsigned rhs_count;
unsigned cst_count = 0;
if (unmodified_parm_or_parm_agg_item (fbi, stmt, expr, index_p, NULL,
aggpos))
{
tree type = TREE_TYPE (expr);
if (aggpos->agg_contents)
{
/* Stop if containing bit-field. */
if (TREE_CODE (expr) == BIT_FIELD_REF
|| contains_bitfld_component_ref_p (expr))
break;
}
*type_p = type;
return true;
}
if (TREE_CODE (expr) != SSA_NAME || SSA_NAME_IS_DEFAULT_DEF (expr))
break;
if (!is_gimple_assign (stmt = SSA_NAME_DEF_STMT (expr)))
break;
switch (gimple_assign_rhs_class (stmt))
{
case GIMPLE_SINGLE_RHS:
expr = gimple_assign_rhs1 (stmt);
continue;
case GIMPLE_UNARY_RHS:
rhs_count = 1;
break;
case GIMPLE_BINARY_RHS:
rhs_count = 2;
break;
case GIMPLE_TERNARY_RHS:
rhs_count = 3;
break;
default:
goto fail;
}
/* Stop if expression is too complex. */
if (op_count++ == op_limit)
break;
if (param_ops_p)
{
eval_op.code = gimple_assign_rhs_code (stmt);
eval_op.type = TREE_TYPE (gimple_assign_lhs (stmt));
eval_op.val[0] = NULL_TREE;
eval_op.val[1] = NULL_TREE;
}
expr = NULL_TREE;
for (unsigned i = 0; i < rhs_count; i++)
{
tree op = gimple_op (stmt, i + 1);
gcc_assert (op && !TYPE_P (op));
if (is_gimple_ip_invariant (op))
{
if (++cst_count == rhs_count)
goto fail;
eval_op.val[cst_count - 1] = op;
}
else if (!expr)
{
/* Found a non-constant operand, and record its index in rhs
operands. */
eval_op.index = i;
expr = op;
}
else
{
/* Found more than one non-constant operands. */
goto fail;
}
}
if (param_ops_p)
vec_safe_insert (*param_ops_p, 0, eval_op);
}
/* Failed to decompose, free resource and return. */
fail:
if (param_ops_p)
vec_free (*param_ops_p);
return false;
}
/* If BB ends by a conditional we can turn into predicates, attach corresponding
predicates to the CFG edges. */
static void
set_cond_stmt_execution_predicate (struct ipa_func_body_info *fbi,
class ipa_fn_summary *summary,
class ipa_node_params *params_summary,
basic_block bb)
{
gimple *last;
tree op, op2;
int index;
struct agg_position_info aggpos;
enum tree_code code, inverted_code;
edge e;
edge_iterator ei;
gimple *set_stmt;
tree param_type;
expr_eval_ops param_ops;
last = last_stmt (bb);
if (!last || gimple_code (last) != GIMPLE_COND)
return;
if (!is_gimple_ip_invariant (gimple_cond_rhs (last)))
return;
op = gimple_cond_lhs (last);
if (decompose_param_expr (fbi, last, op, &index, ¶m_type, &aggpos,
¶m_ops))
{
code = gimple_cond_code (last);
inverted_code = invert_tree_comparison (code, HONOR_NANS (op));
FOR_EACH_EDGE (e, ei, bb->succs)
{
enum tree_code this_code = (e->flags & EDGE_TRUE_VALUE
? code : inverted_code);
/* invert_tree_comparison will return ERROR_MARK on FP
comparisons that are not EQ/NE instead of returning proper
unordered one. Be sure it is not confused with NON_CONSTANT.
And if the edge's target is the final block of diamond CFG graph
of this conditional statement, we do not need to compute
predicate for the edge because the final block's predicate must
be at least as that of the first block of the statement. */
if (this_code != ERROR_MARK
&& !dominated_by_p (CDI_POST_DOMINATORS, bb, e->dest))
{
predicate p
= add_condition (summary, params_summary, index,
param_type, &aggpos,
this_code, gimple_cond_rhs (last), param_ops);
e->aux = edge_predicate_pool.allocate ();
*(predicate *) e->aux = p;
}
}
vec_free (param_ops);
}
if (TREE_CODE (op) != SSA_NAME)
return;
/* Special case
if (builtin_constant_p (op))
constant_code
else
nonconstant_code.
Here we can predicate nonconstant_code. We can't
really handle constant_code since we have no predicate
for this and also the constant code is not known to be
optimized away when inliner doesn't see operand is constant.
Other optimizers might think otherwise. */
if (gimple_cond_code (last) != NE_EXPR
|| !integer_zerop (gimple_cond_rhs (last)))
return;
set_stmt = SSA_NAME_DEF_STMT (op);
if (!gimple_call_builtin_p (set_stmt, BUILT_IN_CONSTANT_P)
|| gimple_call_num_args (set_stmt) != 1)
return;
op2 = gimple_call_arg (set_stmt, 0);
if (!decompose_param_expr (fbi, set_stmt, op2, &index, ¶m_type, &aggpos))
return;
FOR_EACH_EDGE (e, ei, bb->succs) if (e->flags & EDGE_FALSE_VALUE)
{
predicate p = add_condition (summary, params_summary, index,
param_type, &aggpos,
predicate::is_not_constant, NULL_TREE);
e->aux = edge_predicate_pool.allocate ();
*(predicate *) e->aux = p;
}
}
/* If BB ends by a switch we can turn into predicates, attach corresponding
predicates to the CFG edges. */
static void
set_switch_stmt_execution_predicate (struct ipa_func_body_info *fbi,
class ipa_fn_summary *summary,
class ipa_node_params *params_summary,
basic_block bb)
{
gimple *lastg;
tree op;
int index;
struct agg_position_info aggpos;
edge e;
edge_iterator ei;
size_t n;
size_t case_idx;
tree param_type;
expr_eval_ops param_ops;
lastg = last_stmt (bb);
if (!lastg || gimple_code (lastg) != GIMPLE_SWITCH)
return;
gswitch *last = as_a <gswitch *> (lastg);
op = gimple_switch_index (last);
if (!decompose_param_expr (fbi, last, op, &index, ¶m_type, &aggpos,
¶m_ops))
return;
auto_vec<std::pair<tree, tree> > ranges;
tree type = TREE_TYPE (op);
int bound_limit = opt_for_fn (fbi->node->decl,
param_ipa_max_switch_predicate_bounds);
int bound_count = 0;
wide_int vr_wmin, vr_wmax;
value_range_kind vr_type = get_range_info (op, &vr_wmin, &vr_wmax);
FOR_EACH_EDGE (e, ei, bb->succs)
{
e->aux = edge_predicate_pool.allocate ();
*(predicate *) e->aux = false;
}
e = gimple_switch_edge (cfun, last, 0);
/* Set BOUND_COUNT to maximum count to bypass computing predicate for
default case if its target basic block is in convergence point of all
switch cases, which can be determined by checking whether it
post-dominates the switch statement. */
if (dominated_by_p (CDI_POST_DOMINATORS, bb, e->dest))
bound_count = INT_MAX;
n = gimple_switch_num_labels (last);
for (case_idx = 1; case_idx < n; ++case_idx)
{
tree cl = gimple_switch_label (last, case_idx);
tree min = CASE_LOW (cl);
tree max = CASE_HIGH (cl);
predicate p;
e = gimple_switch_edge (cfun, last, case_idx);
/* The case value might not have same type as switch expression,
extend the value based on the expression type. */
if (TREE_TYPE (min) != type)
min = wide_int_to_tree (type, wi::to_wide (min));
if (!max)
max = min;
else if (TREE_TYPE (max) != type)
max = wide_int_to_tree (type, wi::to_wide (max));
/* The case's target basic block is in convergence point of all switch
cases, its predicate should be at least as that of the switch
statement. */
if (dominated_by_p (CDI_POST_DOMINATORS, bb, e->dest))
p = true;
else if (min == max)
p = add_condition (summary, params_summary, index, param_type,
&aggpos, EQ_EXPR, min, param_ops);
else
{
predicate p1, p2;
p1 = add_condition (summary, params_summary, index, param_type,
&aggpos, GE_EXPR, min, param_ops);
p2 = add_condition (summary, params_summary,index, param_type,
&aggpos, LE_EXPR, max, param_ops);
p = p1 & p2;
}
*(class predicate *) e->aux
= p.or_with (summary->conds, *(class predicate *) e->aux);
/* If there are too many disjoint case ranges, predicate for default
case might become too complicated. So add a limit here. */
if (bound_count > bound_limit)
continue;
bool new_range = true;
if (!ranges.is_empty ())
{
wide_int curr_wmin = wi::to_wide (min);
wide_int last_wmax = wi::to_wide (ranges.last ().second);
/* Merge case ranges if they are continuous. */
if (curr_wmin == last_wmax + 1)
new_range = false;
else if (vr_type == VR_ANTI_RANGE)
{
/* If two disjoint case ranges can be connected by anti-range
of switch index, combine them to one range. */
if (wi::lt_p (vr_wmax, curr_wmin - 1, TYPE_SIGN (type)))
vr_type = VR_UNDEFINED;
else if (wi::le_p (vr_wmin, last_wmax + 1, TYPE_SIGN (type)))
new_range = false;
}
}
/* Create/extend a case range. And we count endpoints of range set,
this number nearly equals to number of conditions that we will create
for predicate of default case. */
if (new_range)
{
bound_count += (min == max) ? 1 : 2;
ranges.safe_push (std::make_pair (min, max));
}
else
{
bound_count += (ranges.last ().first == ranges.last ().second);
ranges.last ().second = max;
}
}
e = gimple_switch_edge (cfun, last, 0);
if (bound_count > bound_limit)
{
*(class predicate *) e->aux = true;
vec_free (param_ops);
return;
}
predicate p_seg = true;
predicate p_all = false;
if (vr_type != VR_RANGE)
{
vr_wmin = wi::to_wide (TYPE_MIN_VALUE (type));
vr_wmax = wi::to_wide (TYPE_MAX_VALUE (type));
}
/* Construct predicate to represent default range set that is negation of
all case ranges. Case range is classified as containing single/non-single
values. Suppose a piece of case ranges in the following.
[D1...D2] [S1] ... [Sn] [D3...D4]
To represent default case's range sets between two non-single value
case ranges (From D2 to D3), we construct predicate as:
D2 < x < D3 && x != S1 && ... && x != Sn
*/
for (size_t i = 0; i < ranges.length (); i++)
{
tree min = ranges[i].first;
tree max = ranges[i].second;
if (min == max)
p_seg &= add_condition (summary, params_summary, index,
param_type, &aggpos, NE_EXPR,
min, param_ops);
else
{
/* Do not create sub-predicate for range that is beyond low bound
of switch index. */
if (wi::lt_p (vr_wmin, wi::to_wide (min), TYPE_SIGN (type)))
{
p_seg &= add_condition (summary, params_summary, index,
param_type, &aggpos,
LT_EXPR, min, param_ops);
p_all = p_all.or_with (summary->conds, p_seg);
}
/* Do not create sub-predicate for range that is beyond up bound
of switch index. */
if (wi::le_p (vr_wmax, wi::to_wide (max), TYPE_SIGN (type)))
{
p_seg = false;
break;
}
p_seg = add_condition (summary, params_summary, index,
param_type, &aggpos, GT_EXPR,
max, param_ops);
}
}
p_all = p_all.or_with (summary->conds, p_seg);
*(class predicate *) e->aux
= p_all.or_with (summary->conds, *(class predicate *) e->aux);
vec_free (param_ops);
}
/* For each BB in NODE attach to its AUX pointer predicate under
which it is executable. */
static void
compute_bb_predicates (struct ipa_func_body_info *fbi,
struct cgraph_node *node,
class ipa_fn_summary *summary,
class ipa_node_params *params_summary)
{
struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
bool done = false;
basic_block bb;
FOR_EACH_BB_FN (bb, my_function)
{
set_cond_stmt_execution_predicate (fbi, summary, params_summary, bb);
set_switch_stmt_execution_predicate (fbi, summary, params_summary, bb);
}
/* Entry block is always executable. */
ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
= edge_predicate_pool.allocate ();
*(predicate *) ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux = true;
/* A simple dataflow propagation of predicates forward in the CFG.
TODO: work in reverse postorder. */
while (!done)
{
done = true;
FOR_EACH_BB_FN (bb, my_function)
{
predicate p = false;
edge e;
edge_iterator ei;
FOR_EACH_EDGE (e, ei, bb->preds)
{
if (e->src->aux)
{
predicate this_bb_predicate
= *(predicate *) e->src->aux;
if (e->aux)
this_bb_predicate &= (*(class predicate *) e->aux);
p = p.or_with (summary->conds, this_bb_predicate);
if (p == true)
break;
}
}
if (p != false)
{
basic_block pdom_bb;
if (!bb->aux)
{
done = false;
bb->aux = edge_predicate_pool.allocate ();
*((predicate *) bb->aux) = p;
}
else if (p != *(predicate *) bb->aux)
{
/* This OR operation is needed to ensure monotonous data flow
in the case we hit the limit on number of clauses and the
and/or operations above give approximate answers. */
p = p.or_with (summary->conds, *(predicate *)bb->aux);
if (p != *(predicate *) bb->aux)
{
done = false;
*((predicate *) bb->aux) = p;
}
}
/* For switch/if statement, we can OR-combine predicates of all
its cases/branches to get predicate for basic block in their
convergence point, but sometimes this will generate very
complicated predicate. Actually, we can get simplified
predicate in another way by using the fact that predicate
for a basic block must also hold true for its post dominators.
To be specific, basic block in convergence point of
conditional statement should include predicate of the
statement. */
pdom_bb = get_immediate_dominator (CDI_POST_DOMINATORS, bb);
if (pdom_bb == EXIT_BLOCK_PTR_FOR_FN (my_function) || !pdom_bb)
;
else if (!pdom_bb->aux)
{
done = false;
pdom_bb->aux = edge_predicate_pool.allocate ();
*((predicate *) pdom_bb->aux) = p;
}
else if (p != *(predicate *) pdom_bb->aux)
{
p = p.or_with (summary->conds, *(predicate *)pdom_bb->aux);
if (p != *(predicate *) pdom_bb->aux)
{
done = false;
*((predicate *) pdom_bb->aux) = p;
}
}
}
}
}
}
/* Return predicate specifying when the STMT might have result that is not
a compile time constant. */
static predicate
will_be_nonconstant_expr_predicate (ipa_func_body_info *fbi,
class ipa_fn_summary *summary,
class ipa_node_params *params_summary,
tree expr,
vec<predicate> nonconstant_names)
{
tree parm;
int index;
while (UNARY_CLASS_P (expr))
expr = TREE_OPERAND (expr, 0);
parm = unmodified_parm (fbi, NULL, expr, NULL);
if (parm && (index = ipa_get_param_decl_index (fbi->info, parm)) >= 0)
return add_condition (summary, params_summary, index, TREE_TYPE (parm), NULL,
predicate::changed, NULL_TREE);
if (is_gimple_min_invariant (expr))
return false;
if (TREE_CODE (expr) == SSA_NAME)
return nonconstant_names[SSA_NAME_VERSION (expr)];
if (BINARY_CLASS_P (expr) || COMPARISON_CLASS_P (expr))
{
predicate p1
= will_be_nonconstant_expr_predicate (fbi, summary,
params_summary,
TREE_OPERAND (expr, 0),
nonconstant_names);
if (p1 == true)
return p1;
predicate p2
= will_be_nonconstant_expr_predicate (fbi, summary,
params_summary,
TREE_OPERAND (expr, 1),
nonconstant_names);
return p1.or_with (summary->conds, p2);
}
else if (TREE_CODE (expr) == COND_EXPR)
{
predicate p1
= will_be_nonconstant_expr_predicate (fbi, summary,
params_summary,
TREE_OPERAND (expr, 0),
nonconstant_names);
if (p1 == true)
return p1;
predicate p2
= will_be_nonconstant_expr_predicate (fbi, summary,
params_summary,
TREE_OPERAND (expr, 1),
nonconstant_names);
if (p2 == true)
return p2;
p1 = p1.or_with (summary->conds, p2);
p2 = will_be_nonconstant_expr_predicate (fbi, summary,
params_summary,
TREE_OPERAND (expr, 2),
nonconstant_names);
return p2.or_with (summary->conds, p1);
}
else if (TREE_CODE (expr) == CALL_EXPR)
return true;
else
{
debug_tree (expr);
gcc_unreachable ();
}
return false;
}
/* Return predicate specifying when the STMT might have result that is not
a compile time constant. */
static predicate
will_be_nonconstant_predicate (struct ipa_func_body_info *fbi,
class ipa_fn_summary *summary,
class ipa_node_params *params_summary,
gimple *stmt,
vec<predicate> nonconstant_names)
{
predicate p = true;
ssa_op_iter iter;
tree use;
tree param_type = NULL_TREE;
predicate op_non_const;
bool is_load;
int base_index;
struct agg_position_info aggpos;
/* What statements might be optimized away
when their arguments are constant. */
if (gimple_code (stmt) != GIMPLE_ASSIGN
&& gimple_code (stmt) != GIMPLE_COND
&& gimple_code (stmt) != GIMPLE_SWITCH
&& (gimple_code (stmt) != GIMPLE_CALL
|| !(gimple_call_flags (stmt) & ECF_CONST)))
return p;
/* Stores will stay anyway. */
if (gimple_store_p (stmt))
return p;
is_load = gimple_assign_load_p (stmt);
/* Loads can be optimized when the value is known. */
if (is_load)
{
tree op = gimple_assign_rhs1 (stmt);
if (!decompose_param_expr (fbi, stmt, op, &base_index, ¶m_type,
&aggpos))
return p;
}
else
base_index = -1;
/* See if we understand all operands before we start
adding conditionals. */
FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
{
tree parm = unmodified_parm (fbi, stmt, use, NULL);
/* For arguments we can build a condition. */
if (parm && ipa_get_param_decl_index (fbi->info, parm) >= 0)
continue;
if (TREE_CODE (use) != SSA_NAME)
return p;
/* If we know when operand is constant,
we still can say something useful. */
if (nonconstant_names[SSA_NAME_VERSION (use)] != true)
continue;
return p;
}
if (is_load)
op_non_const =
add_condition (summary, params_summary,
base_index, param_type, &aggpos,
predicate::changed, NULL_TREE);
else
op_non_const = false;
FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
{
tree parm = unmodified_parm (fbi, stmt, use, NULL);
int index;
if (parm && (index = ipa_get_param_decl_index (fbi->info, parm)) >= 0)
{
if (index != base_index)
p = add_condition (summary, params_summary, index,
TREE_TYPE (parm), NULL,
predicate::changed, NULL_TREE);
else
continue;
}
else
p = nonconstant_names[SSA_NAME_VERSION (use)];
op_non_const = p.or_with (summary->conds, op_non_const);
}
if ((gimple_code (stmt) == GIMPLE_ASSIGN || gimple_code (stmt) == GIMPLE_CALL)
&& gimple_op (stmt, 0)
&& TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME)
nonconstant_names[SSA_NAME_VERSION (gimple_op (stmt, 0))]
= op_non_const;
return op_non_const;
}
struct record_modified_bb_info
{
tree op;
bitmap bb_set;
gimple *stmt;
};
/* Value is initialized in INIT_BB and used in USE_BB. We want to compute
probability how often it changes between USE_BB.
INIT_BB->count/USE_BB->count is an estimate, but if INIT_BB
is in different loop nest, we can do better.
This is all just estimate. In theory we look for minimal cut separating
INIT_BB and USE_BB, but we only want to anticipate loop invariant motion
anyway. */
static basic_block
get_minimal_bb (basic_block init_bb, basic_block use_bb)
{
class loop *l = find_common_loop (init_bb->loop_father, use_bb->loop_father);
if (l && l->header->count < init_bb->count)
return l->header;
return init_bb;
}
/* Callback of walk_aliased_vdefs. Records basic blocks where the value may be
set except for info->stmt. */
static bool
record_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef, void *data)
{
struct record_modified_bb_info *info =
(struct record_modified_bb_info *) data;
if (SSA_NAME_DEF_STMT (vdef) == info->stmt)
return false;
if (gimple_clobber_p (SSA_NAME_DEF_STMT (vdef)))
return false;
bitmap_set_bit (info->bb_set,
SSA_NAME_IS_DEFAULT_DEF (vdef)
? ENTRY_BLOCK_PTR_FOR_FN (cfun)->index
: get_minimal_bb
(gimple_bb (SSA_NAME_DEF_STMT (vdef)),
gimple_bb (info->stmt))->index);
if (dump_file)
{
fprintf (dump_file, " Param ");
print_generic_expr (dump_file, info->op, TDF_SLIM);
fprintf (dump_file, " changed at bb %i, minimal: %i stmt: ",
gimple_bb (SSA_NAME_DEF_STMT (vdef))->index,
get_minimal_bb
(gimple_bb (SSA_NAME_DEF_STMT (vdef)),
gimple_bb (info->stmt))->index);
print_gimple_stmt (dump_file, SSA_NAME_DEF_STMT (vdef), 0);
}
return false;
}
/* Return probability (based on REG_BR_PROB_BASE) that I-th parameter of STMT
will change since last invocation of STMT.
Value 0 is reserved for compile time invariants.
For common parameters it is REG_BR_PROB_BASE. For loop invariants it
ought to be REG_BR_PROB_BASE / estimated_iters. */
static int
param_change_prob (ipa_func_body_info *fbi, gimple *stmt, int i)
{
tree op = gimple_call_arg (stmt, i);
basic_block bb = gimple_bb (stmt);
if (TREE_CODE (op) == WITH_SIZE_EXPR)
op = TREE_OPERAND (op, 0);
tree base = get_base_address (op);
/* Global invariants never change. */
if (is_gimple_min_invariant (base))
return 0;
/* We would have to do non-trivial analysis to really work out what
is the probability of value to change (i.e. when init statement
is in a sibling loop of the call).
We do an conservative estimate: when call is executed N times more often
than the statement defining value, we take the frequency 1/N. */
if (TREE_CODE (base) == SSA_NAME)
{
profile_count init_count;
if (!bb->count.nonzero_p ())
return REG_BR_PROB_BASE;
if (SSA_NAME_IS_DEFAULT_DEF (base))
init_count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
else
init_count = get_minimal_bb
(gimple_bb (SSA_NAME_DEF_STMT (base)),
gimple_bb (stmt))->count;
if (init_count < bb->count)
return MAX ((init_count.to_sreal_scale (bb->count)
* REG_BR_PROB_BASE).to_int (), 1);
return REG_BR_PROB_BASE;
}
else
{
ao_ref refd;
profile_count max = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
struct record_modified_bb_info info;
tree init = ctor_for_folding (base);
if (init != error_mark_node)
return 0;
if (!bb->count.nonzero_p ())
return REG_BR_PROB_BASE;
if (dump_file)
{
fprintf (dump_file, " Analyzing param change probability of ");
print_generic_expr (dump_file, op, TDF_SLIM);
fprintf (dump_file, "\n");
}
ao_ref_init (&refd, op);
info.op = op;
info.stmt = stmt;
info.bb_set = BITMAP_ALLOC (NULL);
int walked
= walk_aliased_vdefs (&refd, gimple_vuse (stmt), record_modified, &info,
NULL, NULL, fbi->aa_walk_budget);
if (walked < 0 || bitmap_bit_p (info.bb_set, bb->index))
{
if (dump_file)
{
if (walked < 0)
fprintf (dump_file, " Ran out of AA walking budget.\n");
else
fprintf (dump_file, " Set in same BB as used.\n");
}
BITMAP_FREE (info.bb_set);
return REG_BR_PROB_BASE;
}
bitmap_iterator bi;
unsigned index;
/* Lookup the most frequent update of the value and believe that
it dominates all the other; precise analysis here is difficult. */
EXECUTE_IF_SET_IN_BITMAP (info.bb_set, 0, index, bi)
max = max.max (BASIC_BLOCK_FOR_FN (cfun, index)->count);
if (dump_file)
{
fprintf (dump_file, " Set with count ");
max.dump (dump_file);
fprintf (dump_file, " and used with count ");
bb->count.dump (dump_file);
fprintf (dump_file, " freq %f\n",
max.to_sreal_scale (bb->count).to_double ());
}
BITMAP_FREE (info.bb_set);
if (max < bb->count)
return MAX ((max.to_sreal_scale (bb->count)
* REG_BR_PROB_BASE).to_int (), 1);
return REG_BR_PROB_BASE;
}
}
/* Find whether a basic block BB is the final block of a (half) diamond CFG
sub-graph and if the predicate the condition depends on is known. If so,
return true and store the pointer the predicate in *P. */
static bool
phi_result_unknown_predicate (ipa_func_body_info *fbi,
ipa_fn_summary *summary,
class ipa_node_params *params_summary,
basic_block bb,
predicate *p,
vec<predicate> nonconstant_names)
{
edge e;
edge_iterator ei;
basic_block first_bb = NULL;
gimple *stmt;
if (single_pred_p (bb))
{
*p = false;
return true;
}
FOR_EACH_EDGE (e, ei, bb->preds)
{
if (single_succ_p (e->src))
{
if (!single_pred_p (e->src))
return false;
if (!first_bb)
first_bb = single_pred (e->src);
else if (single_pred (e->src) != first_bb)
return false;
}
else
{
if (!first_bb)
first_bb = e->src;
else if (e->src != first_bb)
return false;
}
}
if (!first_bb)
return false;
stmt = last_stmt (first_bb);
if (!stmt
|| gimple_code (stmt) != GIMPLE_COND
|| !is_gimple_ip_invariant (gimple_cond_rhs (stmt)))
return false;
*p = will_be_nonconstant_expr_predicate (fbi, summary, params_summary,
gimple_cond_lhs (stmt),
nonconstant_names);
if (*p == true)
return false;
else
return true;
}
/* Given a PHI statement in a function described by inline properties SUMMARY
and *P being the predicate describing whether the selected PHI argument is
known, store a predicate for the result of the PHI statement into
NONCONSTANT_NAMES, if possible. */
static void
predicate_for_phi_result (class ipa_fn_summary *summary, gphi *phi,
predicate *p,
vec<predicate> nonconstant_names)
{
unsigned i;
for (i = 0; i < gimple_phi_num_args (phi); i++)
{
tree arg = gimple_phi_arg (phi, i)->def;
if (!is_gimple_min_invariant (arg))
{
gcc_assert (TREE_CODE (arg) == SSA_NAME);
*p = p->or_with (summary->conds,
nonconstant_names[SSA_NAME_VERSION (arg)]);
if (*p == true)
return;
}
}
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "\t\tphi predicate: ");
p->dump (dump_file, summary->conds);
}
nonconstant_names[SSA_NAME_VERSION (gimple_phi_result (phi))] = *p;
}
/* For a typical usage of __builtin_expect (a<b, 1), we
may introduce an extra relation stmt:
With the builtin, we have
t1 = a <= b;
t2 = (long int) t1;
t3 = __builtin_expect (t2, 1);
if (t3 != 0)
goto ...
Without the builtin, we have
if (a<=b)
goto...
This affects the size/time estimation and may have
an impact on the earlier inlining.
Here find this pattern and fix it up later. */
static gimple *
find_foldable_builtin_expect (basic_block bb)
{
gimple_stmt_iterator bsi;
for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
{
gimple *stmt = gsi_stmt (bsi);
if (gimple_call_builtin_p (stmt, BUILT_IN_EXPECT)
|| gimple_call_builtin_p (stmt, BUILT_IN_EXPECT_WITH_PROBABILITY)
|| gimple_call_internal_p (stmt, IFN_BUILTIN_EXPECT))
{
tree var = gimple_call_lhs (stmt);
tree arg = gimple_call_arg (stmt, 0);
use_operand_p use_p;
gimple *use_stmt;
bool match = false;
bool done = false;
if (!var || !arg)
continue;
gcc_assert (TREE_CODE (var) == SSA_NAME);
while (TREE_CODE (arg) == SSA_NAME)
{
gimple *stmt_tmp = SSA_NAME_DEF_STMT (arg);
if (!is_gimple_assign (stmt_tmp))
break;
switch (gimple_assign_rhs_code (stmt_tmp))
{
case LT_EXPR:
case LE_EXPR:
case GT_EXPR:
case GE_EXPR:
case EQ_EXPR:
case NE_EXPR:
match = true;
done = true;
break;
CASE_CONVERT:
break;
default:
done = true;
break;
}
if (done)
break;
arg = gimple_assign_rhs1 (stmt_tmp);
}
if (match && single_imm_use (var, &use_p, &use_stmt)
&& gimple_code (use_stmt) == GIMPLE_COND)
return use_stmt;
}
}
return NULL;
}
/* Return true when the basic blocks contains only clobbers followed by RESX.
Such BBs are kept around to make removal of dead stores possible with
presence of EH and will be optimized out by optimize_clobbers later in the
game.
NEED_EH is used to recurse in case the clobber has non-EH predecessors
that can be clobber only, too.. When it is false, the RESX is not necessary
on the end of basic block. */
static bool
clobber_only_eh_bb_p (basic_block bb, bool need_eh = true)
{
gimple_stmt_iterator gsi = gsi_last_bb (bb);
edge_iterator ei;
edge e;
if (need_eh)
{
if (gsi_end_p (gsi))
return false;
if (gimple_code (gsi_stmt (gsi)) != GIMPLE_RESX)
return false;
gsi_prev (&gsi);
}
else if (!single_succ_p (bb))
return false;
for (; !gsi_end_p (gsi); gsi_prev (&gsi))
{
gimple *stmt = gsi_stmt (gsi);
if (is_gimple_debug (stmt))
continue;
if (gimple_clobber_p (stmt))
continue;
if (gimple_code (stmt) == GIMPLE_LABEL)
break;
return false;
}
/* See if all predecessors are either throws or clobber only BBs. */
FOR_EACH_EDGE (e, ei, bb->preds)
if (!(e->flags & EDGE_EH)
&& !clobber_only_eh_bb_p (e->src, false))
return false;
return true;
}
/* Return true if STMT compute a floating point expression that may be affected
by -ffast-math and similar flags. */
static bool
fp_expression_p (gimple *stmt)
{
ssa_op_iter i;
tree op;
FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF|SSA_OP_USE)
if (FLOAT_TYPE_P (TREE_TYPE (op)))
return true;
return false;
}
/* Analyze function body for NODE.
EARLY indicates run from early optimization pipeline. */
static void
analyze_function_body (struct cgraph_node *node, bool early)
{
sreal time = opt_for_fn (node->decl, param_uninlined_function_time);
/* Estimate static overhead for function prologue/epilogue and alignment. */
int size = opt_for_fn (node->decl, param_uninlined_function_insns);
/* Benefits are scaled by probability of elimination that is in range
<0,2>. */
basic_block bb;
struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
sreal freq;
class ipa_fn_summary *info = ipa_fn_summaries->get_create (node);
class ipa_node_params *params_summary = early ? NULL : IPA_NODE_REF (node);
predicate bb_predicate;
struct ipa_func_body_info fbi;
vec<predicate> nonconstant_names = vNULL;
int nblocks, n;
int *order;
gimple *fix_builtin_expect_stmt;
gcc_assert (my_function && my_function->cfg);
gcc_assert (cfun == my_function);
memset(&fbi, 0, sizeof(fbi));
vec_free (info->conds);
info->conds = NULL;
vec_free (info->size_time_table);
info->size_time_table = NULL;
/* When optimizing and analyzing for IPA inliner, initialize loop optimizer
so we can produce proper inline hints.
When optimizing and analyzing for early inliner, initialize node params
so we can produce correct BB predicates. */
if (opt_for_fn (node->decl, optimize))
{
calculate_dominance_info (CDI_DOMINATORS);
calculate_dominance_info (CDI_POST_DOMINATORS);
if (!early)
loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
else
{
ipa_check_create_node_params ();
ipa_initialize_node_params (node);
}
if (ipa_node_params_sum)
{
fbi.node = node;
fbi.info = IPA_NODE_REF (node);
fbi.bb_infos = vNULL;
fbi.bb_infos.safe_grow_cleared (last_basic_block_for_fn (cfun));
fbi.param_count = count_formal_params (node->decl);
fbi.aa_walk_budget = opt_for_fn (node->decl, param_ipa_max_aa_steps);
nonconstant_names.safe_grow_cleared
(SSANAMES (my_function)->length ());
}
}
if (dump_file)
fprintf (dump_file, "\nAnalyzing function body size: %s\n",
node->dump_name ());
/* When we run into maximal number of entries, we assign everything to the
constant truth case. Be sure to have it in list. */
bb_predicate = true;
info->account_size_time (0, 0, bb_predicate, bb_predicate);
bb_predicate = predicate::not_inlined ();
info->account_size_time (opt_for_fn (node->decl,
param_uninlined_function_insns)
* ipa_fn_summary::size_scale,
opt_for_fn (node->decl,
param_uninlined_function_time),
bb_predicate,
bb_predicate);
if (fbi.info)
compute_bb_predicates (&fbi, node, info, params_summary);
order = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
nblocks = pre_and_rev_post_order_compute (NULL, order, false);
for (n = 0; n < nblocks; n++)
{
bb = BASIC_BLOCK_FOR_FN (cfun, order[n]);
freq = bb->count.to_sreal_scale (ENTRY_BLOCK_PTR_FOR_FN (cfun)->count);
if (clobber_only_eh_bb_p (bb))
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "\n Ignoring BB %i;"
" it will be optimized away by cleanup_clobbers\n",
bb->index);
continue;
}
/* TODO: Obviously predicates can be propagated down across CFG. */
if (fbi.info)
{
if (bb->aux)
bb_predicate = *(predicate *) bb->aux;
else
bb_predicate = false;
}
else
bb_predicate = true;
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "\n BB %i predicate:", bb->index);
bb_predicate.dump (dump_file, info->conds);
}
if (fbi.info && nonconstant_names.exists ())
{
predicate phi_predicate;
bool first_phi = true;
for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi);
gsi_next (&bsi))
{
if (first_phi
&& !phi_result_unknown_predicate (&fbi, info,
params_summary,
bb,
&phi_predicate,
nonconstant_names))
break;
first_phi = false;
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, " ");
print_gimple_stmt (dump_file, gsi_stmt (bsi), 0);
}
predicate_for_phi_result (info, bsi.phi (), &phi_predicate,
nonconstant_names);
}
}
fix_builtin_expect_stmt = find_foldable_builtin_expect (bb);
for (gimple_stmt_iterator bsi = gsi_start_nondebug_bb (bb);
!gsi_end_p (bsi); gsi_next_nondebug (&bsi))
{
gimple *stmt = gsi_stmt (bsi);
int this_size = estimate_num_insns (stmt, &eni_size_weights);
int this_time = estimate_num_insns (stmt, &eni_time_weights);
int prob;
predicate will_be_nonconstant;
/* This relation stmt should be folded after we remove
__builtin_expect call. Adjust the cost here. */
if (stmt == fix_builtin_expect_stmt)
{
this_size--;
this_time--;
}
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, " ");
print_gimple_stmt (dump_file, stmt, 0);
fprintf (dump_file, "\t\tfreq:%3.2f size:%3i time:%3i\n",
freq.to_double (), this_size,
this_time);
}
if (is_gimple_call (stmt)
&& !gimple_call_internal_p (stmt))
{
struct cgraph_edge *edge = node->get_edge (stmt);
ipa_call_summary *es = ipa_call_summaries->get_create (edge);
/* Special case: results of BUILT_IN_CONSTANT_P will be always
resolved as constant. We however don't want to optimize
out the cgraph edges. */
if (nonconstant_names.exists ()
&& gimple_call_builtin_p (stmt, BUILT_IN_CONSTANT_P)
&& gimple_call_lhs (stmt)
&& TREE_CODE (gimple_call_lhs (stmt)) == SSA_NAME)
{
predicate false_p = false;
nonconstant_names[SSA_NAME_VERSION (gimple_call_lhs (stmt))]
= false_p;
}
if (ipa_node_params_sum)
{
int count = gimple_call_num_args (stmt);
int i;
if (count)
es->param.safe_grow_cleared (count);
for (i = 0; i < count; i++)
{
int prob = param_change_prob (&fbi, stmt, i);
gcc_assert (prob >= 0 && prob <= REG_BR_PROB_BASE);
es->param[i].change_prob = prob;
}
}
es->call_stmt_size = this_size;
es->call_stmt_time = this_time;
es->loop_depth = bb_loop_depth (bb);
edge_set_predicate (edge, &bb_predicate);
if (edge->speculative)
{
cgraph_edge *indirect
= edge->speculative_call_indirect_edge ();
ipa_call_summary *es2
= ipa_call_summaries->get_create (indirect);
ipa_call_summaries->duplicate (edge, indirect,
es, es2);
/* Edge is the first direct call.
create and duplicate call summaries for multiple
speculative call targets. */
for (cgraph_edge *direct
= edge->next_speculative_call_target ();
direct;
direct = direct->next_speculative_call_target ())
{
ipa_call_summary *es3
= ipa_call_summaries->get_create (direct);
ipa_call_summaries->duplicate (edge, direct,
es, es3);
}
}
}
/* TODO: When conditional jump or switch is known to be constant, but
we did not translate it into the predicates, we really can account
just maximum of the possible paths. */
if (fbi.info)
will_be_nonconstant
= will_be_nonconstant_predicate (&fbi, info, params_summary,
stmt, nonconstant_names);
else
will_be_nonconstant = true;
if (this_time || this_size)
{
sreal final_time = (sreal)this_time * freq;
prob = eliminated_by_inlining_prob (&fbi, stmt);
if (prob == 1 && dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
"\t\t50%% will be eliminated by inlining\n");
if (prob == 2 && dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "\t\tWill be eliminated by inlining\n");
class predicate p = bb_predicate & will_be_nonconstant;
/* We can ignore statement when we proved it is never going
to happen, but we cannot do that for call statements
because edges are accounted specially. */
if (*(is_gimple_call (stmt) ? &bb_predicate : &p) != false)
{
time += final_time;
size += this_size;
}
/* We account everything but the calls. Calls have their own
size/time info attached to cgraph edges. This is necessary
in order to make the cost disappear after inlining. */
if (!is_gimple_call (stmt))
{
if (prob)
{
predicate ip = bb_predicate & predicate::not_inlined ();
info->account_size_time (this_size * prob,
(final_time * prob) / 2, ip,
p);
}
if (prob != 2)
info->account_size_time (this_size * (2 - prob),
(final_time * (2 - prob) / 2),
bb_predicate,
p);
}
if (!info->fp_expressions && fp_expression_p (stmt))
{
info->fp_expressions = true;
if (dump_file)
fprintf (dump_file, " fp_expression set\n");
}
}
/* Account cost of address calculations in the statements. */
for (unsigned int i = 0; i < gimple_num_ops (stmt); i++)
{
for (tree op = gimple_op (stmt, i);
op && handled_component_p (op);
op = TREE_OPERAND (op, 0))
if ((TREE_CODE (op) == ARRAY_REF
|| TREE_CODE (op) == ARRAY_RANGE_REF)
&& TREE_CODE (TREE_OPERAND (op, 1)) == SSA_NAME)
{
predicate p = bb_predicate;
if (fbi.info)
p = p & will_be_nonconstant_expr_predicate
(&fbi, info, params_summary,
TREE_OPERAND (op, 1),
nonconstant_names);
if (p != false)
{
time += freq;
size += 1;
if (dump_file)
fprintf (dump_file,
"\t\tAccounting address calculation.\n");
info->account_size_time (ipa_fn_summary::size_scale,
freq,
bb_predicate,
p);
}
}
}
}
}
free (order);
if (nonconstant_names.exists () && !early)
{
class loop *loop;
predicate loop_iterations = true;
predicate loop_stride = true;
if (dump_file && (dump_flags & TDF_DETAILS))
flow_loops_dump (dump_file, NULL, 0);
scev_initialize ();
FOR_EACH_LOOP (loop, 0)
{
vec<edge> exits;
edge ex;
unsigned int j;
class tree_niter_desc niter_desc;
if (loop->header->aux)
bb_predicate = *(predicate *) loop->header->aux;
else
bb_predicate = false;
exits = get_loop_exit_edges (loop);
FOR_EACH_VEC_ELT (exits, j, ex)
if (number_of_iterations_exit (loop, ex, &niter_desc, false)
&& !is_gimple_min_invariant (niter_desc.niter))
{
predicate will_be_nonconstant
= will_be_nonconstant_expr_predicate (&fbi, info,
params_summary,
niter_desc.niter,
nonconstant_names);
if (will_be_nonconstant != true)
will_be_nonconstant = bb_predicate & will_be_nonconstant;
if (will_be_nonconstant != true
&& will_be_nonconstant != false)
/* This is slightly inprecise. We may want to represent each
loop with independent predicate. */
loop_iterations &= will_be_nonconstant;
}
exits.release ();
}
/* To avoid quadratic behavior we analyze stride predicates only
with respect to the containing loop. Thus we simply iterate
over all defs in the outermost loop body. */
for (loop = loops_for_fn (cfun)->tree_root->inner;
loop != NULL; loop = loop->next)
{
basic_block *body = get_loop_body (loop);
for (unsigned i = 0; i < loop->num_nodes; i++)
{
gimple_stmt_iterator gsi;
if (body[i]->aux)
bb_predicate = *(predicate *) body[i]->aux;
else
bb_predicate = false;
for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi);
gsi_next (&gsi))
{
gimple *stmt = gsi_stmt (gsi);
if (!is_gimple_assign (stmt))
continue;
tree def = gimple_assign_lhs (stmt);
if (TREE_CODE (def) != SSA_NAME)
continue;
affine_iv iv;
if (!simple_iv (loop_containing_stmt (stmt),
loop_containing_stmt (stmt),
def, &iv, true)
|| is_gimple_min_invariant (iv.step))
continue;
predicate will_be_nonconstant
= will_be_nonconstant_expr_predicate (&fbi, info,
params_summary,
iv.step,
nonconstant_names);
if (will_be_nonconstant != true)
will_be_nonconstant = bb_predicate & will_be_nonconstant;
if (will_be_nonconstant != true
&& will_be_nonconstant != false)
/* This is slightly inprecise. We may want to represent
each loop with independent predicate. */
loop_stride = loop_stride & will_be_nonconstant;
}
}
free (body);
}
ipa_fn_summary *s = ipa_fn_summaries->get (node);
set_hint_predicate (&s->loop_iterations, loop_iterations);
set_hint_predicate (&s->loop_stride, loop_stride);
scev_finalize ();
}
FOR_ALL_BB_FN (bb, my_function)
{
edge e;
edge_iterator ei;
if (bb->aux)
edge_predicate_pool.remove ((predicate *)bb->aux);
bb->aux = NULL;
FOR_EACH_EDGE (e, ei, bb->succs)
{
if (e->aux)
edge_predicate_pool.remove ((predicate *) e->aux);
e->aux = NULL;
}
}
ipa_fn_summary *s = ipa_fn_summaries->get (node);
ipa_size_summary *ss = ipa_size_summaries->get (node);
s->time = time;
ss->self_size = size;
nonconstant_names.release ();
ipa_release_body_info (&fbi);
if (opt_for_fn (node->decl, optimize))
{
if (!early)
loop_optimizer_finalize ();
else if (!ipa_edge_args_sum)
ipa_free_all_node_params ();
free_dominance_info (CDI_DOMINATORS);
free_dominance_info (CDI_POST_DOMINATORS);
}
if (dump_file)
{
fprintf (dump_file, "\n");
ipa_dump_fn_summary (dump_file, node);
}
}
/* Compute function summary.
EARLY is true when we compute parameters during early opts. */
void
compute_fn_summary (struct cgraph_node *node, bool early)
{
HOST_WIDE_INT self_stack_size;
struct cgraph_edge *e;
gcc_assert (!node->inlined_to);
if (!ipa_fn_summaries)
ipa_fn_summary_alloc ();
/* Create a new ipa_fn_summary. */
((ipa_fn_summary_t *)ipa_fn_summaries)->remove_callees (node);
ipa_fn_summaries->remove (node);
class ipa_fn_summary *info = ipa_fn_summaries->get_create (node);
class ipa_size_summary *size_info = ipa_size_summaries->get_create (node);
/* Estimate the stack size for the function if we're optimizing. */
self_stack_size = optimize && !node->thunk.thunk_p
? estimated_stack_frame_size (node) : 0;
size_info->estimated_self_stack_size = self_stack_size;
info->estimated_stack_size = self_stack_size;
if (node->thunk.thunk_p)
{
ipa_call_summary *es = ipa_call_summaries->get_create (node->callees);
predicate t = true;
node->can_change_signature = false;
es->call_stmt_size = eni_size_weights.call_cost;
es->call_stmt_time = eni_time_weights.call_cost;
info->account_size_time (ipa_fn_summary::size_scale
* opt_for_fn (node->decl,
param_uninlined_function_thunk_insns),
opt_for_fn (node->decl,
param_uninlined_function_thunk_time), t, t);
t = predicate::not_inlined ();
info->account_size_time (2 * ipa_fn_summary::size_scale, 0, t, t);
ipa_update_overall_fn_summary (node);
size_info->self_size = size_info->size;
if (stdarg_p (TREE_TYPE (node->decl)))
{
info->inlinable = false;
node->callees->inline_failed = CIF_VARIADIC_THUNK;
}
else
info->inlinable = true;
}
else
{
/* Even is_gimple_min_invariant rely on current_function_decl. */
push_cfun (DECL_STRUCT_FUNCTION (node->decl));
/* During IPA profile merging we may be called w/o virtual SSA form
built. */
update_ssa (TODO_update_ssa_only_virtuals);
/* Can this function be inlined at all? */
if (!opt_for_fn (node->decl, optimize)
&& !lookup_attribute ("always_inline",
DECL_ATTRIBUTES (node->decl)))
info->inlinable = false;
else
info->inlinable = tree_inlinable_function_p (node->decl);
/* Type attributes can use parameter indices to describe them. */
if (TYPE_ATTRIBUTES (TREE_TYPE (node->decl))
/* Likewise for #pragma omp declare simd functions or functions
with simd attribute. */
|| lookup_attribute ("omp declare simd",
DECL_ATTRIBUTES (node->decl)))
node->can_change_signature = false;
else
{
/* Otherwise, inlinable functions always can change signature. */
if (info->inlinable)
node->can_change_signature = true;
else
{
/* Functions calling builtin_apply cannot change signature. */
for (e = node->callees; e; e = e->next_callee)
{
tree cdecl = e->callee->decl;
if (fndecl_built_in_p (cdecl, BUILT_IN_APPLY_ARGS)
|| fndecl_built_in_p (cdecl, BUILT_IN_VA_START))
break;
}
node->can_change_signature = !e;
}
}
analyze_function_body (node, early);
pop_cfun ();
}
/* Inlining characteristics are maintained by the cgraph_mark_inline. */
size_info->size = size_info->self_size;
info->estimated_stack_size = size_info->estimated_self_stack_size;
/* Code above should compute exactly the same result as
ipa_update_overall_fn_summary except for case when speculative
edges are present since these are accounted to size but not
self_size. Do not compare time since different order the roundoff
errors result in slight changes. */
ipa_update_overall_fn_summary (node);
if (flag_checking)
{
for (e = node->indirect_calls; e; e = e->next_callee)
if (e->speculative)
break;
gcc_assert (e || size_info->size == size_info->self_size);
}
}
/* Compute parameters of functions used by inliner using
current_function_decl. */
static unsigned int
compute_fn_summary_for_current (void)
{
compute_fn_summary (cgraph_node::get (current_function_decl), true);
return 0;
}
/* Estimate benefit devirtualizing indirect edge IE, provided KNOWN_VALS,
KNOWN_CONTEXTS and KNOWN_AGGS. */
static bool
estimate_edge_devirt_benefit (struct cgraph_edge *ie,
int *size, int *time,
vec<tree> known_vals,
vec<ipa_polymorphic_call_context> known_contexts,
vec<ipa_agg_value_set> known_aggs)
{
tree target;
struct cgraph_node *callee;
class ipa_fn_summary *isummary;
enum availability avail;
bool speculative;
if (!known_vals.length () && !known_contexts.length ())
return false;
if (!opt_for_fn (ie->caller->decl, flag_indirect_inlining))
return false;
target = ipa_get_indirect_edge_target (ie, known_vals, known_contexts,
known_aggs, &speculative);
if (!target || speculative)
return false;
/* Account for difference in cost between indirect and direct calls. */
*size -= (eni_size_weights.indirect_call_cost - eni_size_weights.call_cost);
*time -= (eni_time_weights.indirect_call_cost - eni_time_weights.call_cost);
gcc_checking_assert (*time >= 0);
gcc_checking_assert (*size >= 0);
callee = cgraph_node::get (target);
if (!callee || !callee->definition)
return false;
callee = callee->function_symbol (&avail);
if (avail < AVAIL_AVAILABLE)
return false;
isummary = ipa_fn_summaries->get (callee);
if (isummary == NULL)
return false;
return isummary->inlinable;
}
/* Increase SIZE, MIN_SIZE (if non-NULL) and TIME for size and time needed to
handle edge E with probability PROB.
Set HINTS if edge may be devirtualized.
KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS describe context of the call
site. */
static inline void
estimate_edge_size_and_time (struct cgraph_edge *e, int *size, int *min_size,
sreal *time,
vec<tree> known_vals,
vec<ipa_polymorphic_call_context> known_contexts,
vec<ipa_agg_value_set> known_aggs,
ipa_hints *hints)
{
class ipa_call_summary *es = ipa_call_summaries->get (e);
int call_size = es->call_stmt_size;
int call_time = es->call_stmt_time;
int cur_size;
if (!e->callee && hints && e->maybe_hot_p ()
&& estimate_edge_devirt_benefit (e, &call_size, &call_time,
known_vals, known_contexts, known_aggs))
*hints |= INLINE_HINT_indirect_call;
cur_size = call_size * ipa_fn_summary::size_scale;
*size += cur_size;
if (min_size)
*min_size += cur_size;
if (time)
*time += ((sreal)call_time) * e->sreal_frequency ();
}
/* Increase SIZE, MIN_SIZE and TIME for size and time needed to handle all
calls in NODE. POSSIBLE_TRUTHS, KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS
describe context of the call site.
Helper for estimate_calls_size_and_time which does the same but
(in most cases) faster. */
static void
estimate_calls_size_and_time_1 (struct cgraph_node *node, int *size,
int *min_size, sreal *time,
ipa_hints *hints,
clause_t possible_truths,
vec<tree> known_vals,
vec<ipa_polymorphic_call_context> known_contexts,
vec<ipa_agg_value_set> known_aggs)
{
struct cgraph_edge *e;
for (e = node->callees; e; e = e->next_callee)
{
if (!e->inline_failed)
{
gcc_checking_assert (!ipa_call_summaries->get (e));
estimate_calls_size_and_time_1 (e->callee, size, min_size, time,
hints,
possible_truths,
known_vals, known_contexts,
known_aggs);
continue;
}
class ipa_call_summary *es = ipa_call_summaries->get (e);
/* Do not care about zero sized builtins. */
if (!es->call_stmt_size)
{
gcc_checking_assert (!es->call_stmt_time);
continue;
}
if (!es->predicate
|| es->predicate->evaluate (possible_truths))
{
/* Predicates of calls shall not use NOT_CHANGED codes,
so we do not need to compute probabilities. */
estimate_edge_size_and_time (e, size,
es->predicate ? NULL : min_size,
time,
known_vals, known_contexts,
known_aggs, hints);
}
}
for (e = node->indirect_calls; e; e = e->next_callee)
{
class ipa_call_summary *es = ipa_call_summaries->get (e);
if (!es->predicate
|| es->predicate->evaluate (possible_truths))
estimate_edge_size_and_time (e, size,
es->predicate ? NULL : min_size,
time,
known_vals, known_contexts, known_aggs,
hints);
}
}
/* Populate sum->call_size_time_table for edges from NODE. */
static void
summarize_calls_size_and_time (struct cgraph_node *node,
ipa_fn_summary *sum)
{
struct cgraph_edge *e;
for (e = node->callees; e; e = e->next_callee)
{
if (!e->inline_failed)
{
gcc_checking_assert (!ipa_call_summaries->get (e));
summarize_calls_size_and_time (e->callee, sum);
continue;
}
int size = 0;
sreal time = 0;
estimate_edge_size_and_time (e, &size, NULL, &time,
vNULL, vNULL, vNULL, NULL);
struct predicate pred = true;
class ipa_call_summary *es = ipa_call_summaries->get (e);
if (es->predicate)
pred = *es->predicate;
sum->account_size_time (size, time, pred, pred, true);
}
for (e = node->indirect_calls; e; e = e->next_callee)
{
int size = 0;
sreal time = 0;
estimate_edge_size_and_time (e, &size, NULL, &time,
vNULL, vNULL, vNULL, NULL);
struct predicate pred = true;
class ipa_call_summary *es = ipa_call_summaries->get (e);
if (es->predicate)
pred = *es->predicate;
sum->account_size_time (size, time, pred, pred, true);
}
}
/* Increase SIZE, MIN_SIZE and TIME for size and time needed to handle all
calls in NODE. POSSIBLE_TRUTHS, KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS
describe context of the call site. */
static void
estimate_calls_size_and_time (struct cgraph_node *node, int *size,
int *min_size, sreal *time,
ipa_hints *hints,
clause_t possible_truths,
vec<tree> known_vals,
vec<ipa_polymorphic_call_context> known_contexts,
vec<ipa_agg_value_set> known_aggs)
{
class ipa_fn_summary *sum = ipa_fn_summaries->get (node);
bool use_table = true;
gcc_assert (node->callees || node->indirect_calls);
/* During early inlining we do not calculate info for very
large functions and thus there is no need for producing
summaries. */
if (!ipa_node_params_sum)
use_table = false;
/* Do not calculate summaries for simple wrappers; it is waste
of memory. */
else if (node->callees && node->indirect_calls
&& node->callees->inline_failed && !node->callees->next_callee)
use_table = false;
/* If there is an indirect edge that may be optimized, we need
to go the slow way. */
else if ((known_vals.length ()
|| known_contexts.length ()
|| known_aggs.length ()) && hints)
{
class ipa_node_params *params_summary = IPA_NODE_REF (node);
unsigned int nargs = params_summary
? ipa_get_param_count (params_summary) : 0;
for (unsigned int i = 0; i < nargs && use_table; i++)
{
if (ipa_is_param_used_by_indirect_call (params_summary, i)
&& ((known_vals.length () > i && known_vals[i])
|| (known_aggs.length () > i
&& known_aggs[i].items.length ())))
use_table = false;
else if (ipa_is_param_used_by_polymorphic_call (params_summary, i)
&& (known_contexts.length () > i
&& !known_contexts[i].useless_p ()))
use_table = false;
}
}
/* Fast path is via the call size time table. */
if (use_table)
{
/* Build summary if it is absent. */
if (!sum->call_size_time_table)
{
predicate true_pred = true;
sum->account_size_time (0, 0, true_pred, true_pred, true);
summarize_calls_size_and_time (node, sum);
}
int old_size = *size;
sreal old_time = time ? *time : 0;
if (min_size)
*min_size += (*sum->call_size_time_table)[0].size;
unsigned int i;
size_time_entry *e;
/* Walk the table and account sizes and times. */
for (i = 0; vec_safe_iterate (sum->call_size_time_table, i, &e);
i++)
if (e->exec_predicate.evaluate (possible_truths))
{
*size += e->size;
if (time)
*time += e->time;
}
/* Be careful and see if both methods agree. */
if ((flag_checking || dump_file)
/* Do not try to sanity check when we know we lost some
precision. */
&& sum->call_size_time_table->length ()
< ipa_fn_summary::max_size_time_table_size)
{
estimate_calls_size_and_time_1 (node, &old_size, NULL, &old_time, NULL,
possible_truths, known_vals,
known_contexts, known_aggs);
gcc_assert (*size == old_size);
if (time && (*time - old_time > 1 || *time - old_time < -1)
&& dump_file)
fprintf (dump_file, "Time mismatch in call summary %f!=%f\n",
old_time.to_double (),
time->to_double ());
}
}
/* Slow path by walking all edges. */
else
estimate_calls_size_and_time_1 (node, size, min_size, time, hints,
possible_truths, known_vals, known_contexts,
known_aggs);
}
/* Default constructor for ipa call context.
Memory allocation of known_vals, known_contexts
and known_aggs vectors is owned by the caller, but can
be release by ipa_call_context::release.
inline_param_summary is owned by the caller. */
ipa_call_context::ipa_call_context (cgraph_node *node,
clause_t possible_truths,
clause_t nonspec_possible_truths,
vec<tree> known_vals,
vec<ipa_polymorphic_call_context>
known_contexts,
vec<ipa_agg_value_set> known_aggs,
vec<inline_param_summary>
inline_param_summary)
: m_node (node), m_possible_truths (possible_truths),
m_nonspec_possible_truths (nonspec_possible_truths),
m_inline_param_summary (inline_param_summary),
m_known_vals (known_vals),
m_known_contexts (known_contexts),
m_known_aggs (known_aggs)
{
}
/* Set THIS to be a duplicate of CTX. Copy all relevant info. */
void
ipa_call_context::duplicate_from (const ipa_call_context &ctx)
{
m_node = ctx.m_node;
m_possible_truths = ctx.m_possible_truths;
m_nonspec_possible_truths = ctx.m_nonspec_possible_truths;
class ipa_node_params *params_summary = IPA_NODE_REF (m_node);
unsigned int nargs = params_summary
? ipa_get_param_count (params_summary) : 0;
m_inline_param_summary = vNULL;
/* Copy the info only if there is at least one useful entry. */
if (ctx.m_inline_param_summary.exists ())
{
unsigned int n = MIN (ctx.m_inline_param_summary.length (), nargs);
for (unsigned int i = 0; i < n; i++)
if (ipa_is_param_used_by_ipa_predicates (params_summary, i)
&& !ctx.m_inline_param_summary[i].useless_p ())
{
m_inline_param_summary
= ctx.m_inline_param_summary.copy ();
break;
}
}
m_known_vals = vNULL;
if (ctx.m_known_vals.exists ())
{
unsigned int n = MIN (ctx.m_known_vals.length (), nargs);
for (unsigned int i = 0; i < n; i++)
if (ipa_is_param_used_by_indirect_call (params_summary, i)
&& ctx.m_known_vals[i])
{
m_known_vals = ctx.m_known_vals.copy ();
break;
}
}
m_known_contexts = vNULL;
if (ctx.m_known_contexts.exists ())
{
unsigned int n = MIN (ctx.m_known_contexts.length (), nargs);
for (unsigned int i = 0; i < n; i++)
if (ipa_is_param_used_by_polymorphic_call (params_summary, i)
&& !ctx.m_known_contexts[i].useless_p ())
{
m_known_contexts = ctx.m_known_contexts.copy ();
break;
}
}
m_known_aggs = vNULL;
if (ctx.m_known_aggs.exists ())
{
unsigned int n = MIN (ctx.m_known_aggs.length (), nargs);
for (unsigned int i = 0; i < n; i++)
if (ipa_is_param_used_by_indirect_call (params_summary, i)
&& !ctx.m_known_aggs[i].is_empty ())
{
m_known_aggs = ipa_copy_agg_values (ctx.m_known_aggs);
break;
}
}
}
/* Release memory used by known_vals/contexts/aggs vectors.
If ALL is true release also inline_param_summary.
This happens when context was previously duplicated to be stored
into cache. */
void
ipa_call_context::release (bool all)
{
/* See if context is initialized at first place. */
if (!m_node)
return;
ipa_release_agg_values (m_known_aggs, all);
if (all)
{
m_known_vals.release ();
m_known_contexts.release ();
m_inline_param_summary.release ();
}
}
/* Return true if CTX describes the same call context as THIS. */
bool
ipa_call_context::equal_to (const ipa_call_context &ctx)
{
if (m_node != ctx.m_node
|| m_possible_truths != ctx.m_possible_truths
|| m_nonspec_possible_truths != ctx.m_nonspec_possible_truths)
return false;
class ipa_node_params *params_summary = IPA_NODE_REF (m_node);
unsigned int nargs = params_summary
? ipa_get_param_count (params_summary) : 0;
if (m_inline_param_summary.exists () || ctx.m_inline_param_summary.exists ())
{
for (unsigned int i = 0; i < nargs; i++)
{
if (!ipa_is_param_used_by_ipa_predicates (params_summary, i))
continue;
if (i >= m_inline_param_summary.length ()
|| m_inline_param_summary[i].useless_p ())
{
if (i < ctx.m_inline_param_summary.length ()
&& !ctx.m_inline_param_summary[i].useless_p ())
return false;
continue;
}
if (i >= ctx.m_inline_param_summary.length ()
|| ctx.m_inline_param_summary[i].useless_p ())
{
if (i < m_inline_param_summary.length ()
&& !m_inline_param_summary[i].useless_p ())
return false;
continue;
}
if (!m_inline_param_summary[i].equal_to
(ctx.m_inline_param_summary[i]))
return false;
}
}
if (m_known_vals.exists () || ctx.m_known_vals.exists ())
{
for (unsigned int i = 0; i < nargs; i++)
{
if (!ipa_is_param_used_by_indirect_call (params_summary, i))
continue;
if (i >= m_known_vals.length () || !m_known_vals[i])
{
if (i < ctx.m_known_vals.length () && ctx.m_known_vals[i])
return false;
continue;
}
if (i >= ctx.m_known_vals.length () || !ctx.m_known_vals[i])
{
if (i < m_known_vals.length () && m_known_vals[i])
return false;
continue;
}
if (m_known_vals[i] != ctx.m_known_vals[i])
return false;
}
}
if (m_known_contexts.exists () || ctx.m_known_contexts.exists ())
{
for (unsigned int i = 0; i < nargs; i++)
{
if (!ipa_is_param_used_by_polymorphic_call (params_summary, i))
continue;
if (i >= m_known_contexts.length ()
|| m_known_contexts[i].useless_p ())
{
if (i < ctx.m_known_contexts.length ()
&& !ctx.m_known_contexts[i].useless_p ())
return false;
continue;
}
if (i >= ctx.m_known_contexts.length ()
|| ctx.m_known_contexts[i].useless_p ())
{
if (i < m_known_contexts.length ()
&& !m_known_contexts[i].useless_p ())
return false;
continue;
}
if (!m_known_contexts[i].equal_to
(ctx.m_known_contexts[i]))
return false;
}
}
if (m_known_aggs.exists () || ctx.m_known_aggs.exists ())
{
for (unsigned int i = 0; i < nargs; i++)
{
if (!ipa_is_param_used_by_indirect_call (params_summary, i))
continue;
if (i >= m_known_aggs.length () || m_known_aggs[i].is_empty ())
{
if (i < ctx.m_known_aggs.length ()
&& !ctx.m_known_aggs[i].is_empty ())
return false;
continue;
}
if (i >= ctx.m_known_aggs.length ()
|| ctx.m_known_aggs[i].is_empty ())
{
if (i < m_known_aggs.length ()
&& !m_known_aggs[i].is_empty ())
return false;
continue;
}
if (!m_known_aggs[i].equal_to (ctx.m_known_aggs[i]))
return false;
}
}
return true;
}
/* Estimate size and time needed to execute call in the given context.
Additionally determine hints determined by the context. Finally compute
minimal size needed for the call that is independent on the call context and
can be used for fast estimates. Return the values in RET_SIZE,
RET_MIN_SIZE, RET_TIME and RET_HINTS. */
void
ipa_call_context::estimate_size_and_time (int *ret_size,
int *ret_min_size,
sreal *ret_time,
sreal *ret_nonspecialized_time,
ipa_hints *ret_hints)
{
class ipa_fn_summary *info = ipa_fn_summaries->get (m_node);
size_time_entry *e;
int size = 0;
sreal time = 0;
int min_size = 0;
ipa_hints hints = 0;
int i;
if (dump_file && (dump_flags & TDF_DETAILS))
{
bool found = false;
fprintf (dump_file, " Estimating body: %s\n"
" Known to be false: ", m_node->dump_name ());
for (i = predicate::not_inlined_condition;
i < (predicate::first_dynamic_condition
+ (int) vec_safe_length (info->conds)); i++)
if (!(m_possible_truths & (1 << i)))
{
if (found)
fprintf (dump_file, ", ");
found = true;
dump_condition (dump_file, info->conds, i);
}
}
if (m_node->callees || m_node->indirect_calls)
estimate_calls_size_and_time (m_node, &size, &min_size,
ret_time ? &time : NULL,
ret_hints ? &hints : NULL, m_possible_truths,
m_known_vals, m_known_contexts, m_known_aggs);
sreal nonspecialized_time = time;
min_size += (*info->size_time_table)[0].size;
for (i = 0; vec_safe_iterate (info->size_time_table, i, &e); i++)
{
bool exec = e->exec_predicate.evaluate (m_nonspec_possible_truths);
/* Because predicates are conservative, it can happen that nonconst is 1
but exec is 0. */
if (exec)
{
bool nonconst = e->nonconst_predicate.evaluate (m_possible_truths);
gcc_checking_assert (e->time >= 0);
gcc_checking_assert (time >= 0);
/* We compute specialized size only because size of nonspecialized
copy is context independent.
The difference between nonspecialized execution and specialized is
that nonspecialized is not going to have optimized out computations
known to be constant in a specialized setting. */
if (nonconst)
size += e->size;
if (!ret_time)
continue;
nonspecialized_time += e->time;
if (!nonconst)
;
else if (!m_inline_param_summary.exists ())
{
if (nonconst)
time += e->time;
}
else
{
int prob = e->nonconst_predicate.probability
(info->conds, m_possible_truths,
m_inline_param_summary);
gcc_checking_assert (prob >= 0);
gcc_checking_assert (prob <= REG_BR_PROB_BASE);
if (prob == REG_BR_PROB_BASE)
time += e->time;
else
time += e->time * prob / REG_BR_PROB_BASE;
}
gcc_checking_assert (time >= 0);
}
}
gcc_checking_assert ((*info->size_time_table)[0].exec_predicate == true);
gcc_checking_assert ((*info->size_time_table)[0].nonconst_predicate == true);
gcc_checking_assert (min_size >= 0);
gcc_checking_assert (size >= 0);
gcc_checking_assert (time >= 0);
/* nonspecialized_time should be always bigger than specialized time.
Roundoff issues however may get into the way. */
gcc_checking_assert ((nonspecialized_time - time * 99 / 100) >= -1);
/* Roundoff issues may make specialized time bigger than nonspecialized
time. We do not really want that to happen because some heuristics
may get confused by seeing negative speedups. */
if (time > nonspecialized_time)
time = nonspecialized_time;
if (ret_hints)
{
if (info->loop_iterations
&& !info->loop_iterations->evaluate (m_possible_truths))
hints |= INLINE_HINT_loop_iterations;
if (info->loop_stride
&& !info->loop_stride->evaluate (m_possible_truths))
hints |= INLINE_HINT_loop_stride;
if (info->scc_no)
hints |= INLINE_HINT_in_scc;
if (DECL_DECLARED_INLINE_P (m_node->decl))
hints |= INLINE_HINT_declared_inline;
}
size = RDIV (size, ipa_fn_summary::size_scale);
min_size = RDIV (min_size, ipa_fn_summary::size_scale);
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "\n size:%i time:%f nonspec time:%f\n", (int) size,
time.to_double (), nonspecialized_time.to_double ());
if (ret_time)
*ret_time = time;
if (ret_nonspecialized_time)
*ret_nonspecialized_time = nonspecialized_time;
if (ret_size)
*ret_size = size;
if (ret_min_size)
*ret_min_size = min_size;
if (ret_hints)
*ret_hints = hints;
return;
}
/* Estimate size and time needed to execute callee of EDGE assuming that
parameters known to be constant at caller of EDGE are propagated.
KNOWN_VALS and KNOWN_CONTEXTS are vectors of assumed known constant values
and types for parameters. */
void
estimate_ipcp_clone_size_and_time (struct cgraph_node *node,
vec<tree> known_vals,
vec<ipa_polymorphic_call_context>
known_contexts,
vec<ipa_agg_value_set> known_aggs,
int *ret_size, sreal *ret_time,
sreal *ret_nonspec_time,
ipa_hints *hints)
{
clause_t clause, nonspec_clause;
/* TODO: Also pass known value ranges. */
evaluate_conditions_for_known_args (node, false, known_vals, vNULL,
known_aggs, &clause, &nonspec_clause);
ipa_call_context ctx (node, clause, nonspec_clause,
known_vals, known_contexts,
known_aggs, vNULL);
ctx.estimate_size_and_time (ret_size, NULL, ret_time,
ret_nonspec_time, hints);
}
/* Return stack frame offset where frame of NODE is supposed to start inside
of the function it is inlined to.
Return 0 for functions that are not inlined. */
HOST_WIDE_INT
ipa_get_stack_frame_offset (struct cgraph_node *node)
{
HOST_WIDE_INT offset = 0;
if (!node->inlined_to)
return 0;
node = node->callers->caller;
while (true)
{
offset += ipa_size_summaries->get (node)->estimated_self_stack_size;
if (!node->inlined_to)
return offset;
node = node->callers->caller;
}
}
/* Update summary information of inline clones after inlining.
Compute peak stack usage. */
static void
inline_update_callee_summaries (struct cgraph_node *node, int depth)
{
struct cgraph_edge *e;
ipa_propagate_frequency (node);
for (e = node->callees; e; e = e->next_callee)
{
if (!e->inline_failed)
inline_update_callee_summaries (e->callee, depth);
else
ipa_call_summaries->get (e)->loop_depth += depth;
}
for (e = node->indirect_calls; e; e = e->next_callee)
ipa_call_summaries->get (e)->loop_depth += depth;
}
/* Update change_prob of EDGE after INLINED_EDGE has been inlined.
When function A is inlined in B and A calls C with parameter that
changes with probability PROB1 and C is known to be passthrough
of argument if B that change with probability PROB2, the probability
of change is now PROB1*PROB2. */
static void
remap_edge_change_prob (struct cgraph_edge *inlined_edge,
struct cgraph_edge *edge)
{
if (ipa_node_params_sum)
{
int i;
class ipa_edge_args *args = IPA_EDGE_REF (edge);
if (!args)
return;
class ipa_call_summary *es = ipa_call_summaries->get (edge);
class ipa_call_summary *inlined_es
= ipa_call_summaries->get (inlined_edge);
if (es->param.length () == 0)
return;
for (i = 0; i < ipa_get_cs_argument_count (args); i++)
{
struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i);
if (jfunc->type == IPA_JF_PASS_THROUGH
|| jfunc->type == IPA_JF_ANCESTOR)
{
int id = jfunc->type == IPA_JF_PASS_THROUGH
? ipa_get_jf_pass_through_formal_id (jfunc)
: ipa_get_jf_ancestor_formal_id (jfunc);
if (id < (int) inlined_es->param.length ())
{
int prob1 = es->param[i].change_prob;
int prob2 = inlined_es->param[id].change_prob;
int prob = combine_probabilities (prob1, prob2);
if (prob1 && prob2 && !prob)
prob = 1;
es->param[i].change_prob = prob;
}
}
}
}
}
/* Update edge summaries of NODE after INLINED_EDGE has been inlined.
Remap predicates of callees of NODE. Rest of arguments match
remap_predicate.
Also update change probabilities. */
static void
remap_edge_summaries (struct cgraph_edge *inlined_edge,
struct cgraph_node *node,
class ipa_fn_summary *info,
class ipa_node_params *params_summary,
class ipa_fn_summary *callee_info,
vec<int> operand_map,
vec<int> offset_map,
clause_t possible_truths,
predicate *toplev_predicate)
{
struct cgraph_edge *e, *next;
for (e = node->callees; e; e = next)
{
predicate p;
next = e->next_callee;
if (e->inline_failed)
{
class ipa_call_summary *es = ipa_call_summaries->get (e);
remap_edge_change_prob (inlined_edge, e);
if (es->predicate)
{
p = es->predicate->remap_after_inlining
(info, params_summary,
callee_info, operand_map,
offset_map, possible_truths,
*toplev_predicate);
edge_set_predicate (e, &p);
}
else
edge_set_predicate (e, toplev_predicate);
}
else
remap_edge_summaries (inlined_edge, e->callee, info,
params_summary, callee_info,
operand_map, offset_map, possible_truths,
toplev_predicate);
}
for (e = node->indirect_calls; e; e = next)
{
class ipa_call_summary *es = ipa_call_summaries->get (e);
predicate p;
next = e->next_callee;
remap_edge_change_prob (inlined_edge, e);
if (es->predicate)
{
p = es->predicate->remap_after_inlining
(info, params_summary,
callee_info, operand_map, offset_map,
possible_truths, *toplev_predicate);
edge_set_predicate (e, &p);
}
else
edge_set_predicate (e, toplev_predicate);
}
}
/* Same as remap_predicate, but set result into hint *HINT. */
static void
remap_hint_predicate (class ipa_fn_summary *info,
class ipa_node_params *params_summary,
class ipa_fn_summary *callee_info,
predicate **hint,
vec<int> operand_map,
vec<int> offset_map,
clause_t possible_truths,
predicate *toplev_predicate)
{
predicate p;
if (!*hint)
return;
p = (*hint)->remap_after_inlining
(info, params_summary, callee_info,
operand_map, offset_map,
possible_truths, *toplev_predicate);
if (p != false && p != true)
{
if (!*hint)
set_hint_predicate (hint, p);
else
**hint &= p;
}
}
/* We inlined EDGE. Update summary of the function we inlined into. */
void
ipa_merge_fn_summary_after_inlining (struct cgraph_edge *edge)
{
ipa_fn_summary *callee_info = ipa_fn_summaries->get (edge->callee);
struct cgraph_node *to = (edge->caller->inlined_to
? edge->caller->inlined_to : edge->caller);
class ipa_fn_summary *info = ipa_fn_summaries->get (to);
clause_t clause = 0; /* not_inline is known to be false. */
size_time_entry *e;
auto_vec<int, 8> operand_map;
auto_vec<int, 8> offset_map;
int i;
predicate toplev_predicate;
class ipa_call_summary *es = ipa_call_summaries->get (edge);
class ipa_node_params *params_summary = (ipa_node_params_sum
? IPA_NODE_REF (to) : NULL);
if (es->predicate)
toplev_predicate = *es->predicate;
else
toplev_predicate = true;
info->fp_expressions |= callee_info->fp_expressions;
if (callee_info->conds)
{
auto_vec<tree, 32> known_vals;
auto_vec<ipa_agg_value_set, 32> known_aggs;
evaluate_properties_for_edge (edge, true, &clause, NULL,
&known_vals, NULL, &known_aggs);
}
if (ipa_node_params_sum && callee_info->conds)
{
class ipa_edge_args *args = IPA_EDGE_REF (edge);
int count = args ? ipa_get_cs_argument_count (args) : 0;
int i;
if (count)
{
operand_map.safe_grow_cleared (count);
offset_map.safe_grow_cleared (count);
}
for (i = 0; i < count; i++)
{
struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i);
int map = -1;
/* TODO: handle non-NOPs when merging. */
if (jfunc->type == IPA_JF_PASS_THROUGH)
{
if (ipa_get_jf_pass_through_operation (jfunc) == NOP_EXPR)
map = ipa_get_jf_pass_through_formal_id (jfunc);
if (!ipa_get_jf_pass_through_agg_preserved (jfunc))
offset_map[i] = -1;
}
else if (jfunc->type == IPA_JF_ANCESTOR)
{
HOST_WIDE_INT offset = ipa_get_jf_ancestor_offset (jfunc);
if (offset >= 0 && offset < INT_MAX)
{
map = ipa_get_jf_ancestor_formal_id (jfunc);
if (!ipa_get_jf_ancestor_agg_preserved (jfunc))
offset = -1;
offset_map[i] = offset;
}
}
operand_map[i] = map;
gcc_assert (map < ipa_get_param_count (params_summary));
}
}
sreal freq = edge->sreal_frequency ();
for (i = 0; vec_safe_iterate (callee_info->size_time_table, i, &e); i++)
{
predicate p;
p = e->exec_predicate.remap_after_inlining
(info, params_summary,
callee_info, operand_map,
offset_map, clause,
toplev_predicate);
predicate nonconstp;
nonconstp = e->nonconst_predicate.remap_after_inlining
(info, params_summary,
callee_info, operand_map,
offset_map, clause,
toplev_predicate);
if (p != false && nonconstp != false)
{
sreal add_time = ((sreal)e->time * freq);
int prob = e->nonconst_predicate.probability (callee_info->conds,
clause, es->param);
if (prob != REG_BR_PROB_BASE)
add_time = add_time * prob / REG_BR_PROB_BASE;
if (prob != REG_BR_PROB_BASE
&& dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "\t\tScaling time by probability:%f\n",
(double) prob / REG_BR_PROB_BASE);
}
info->account_size_time (e->size, add_time, p, nonconstp);
}
}
remap_edge_summaries (edge, edge->callee, info, params_summary,
callee_info, operand_map,
offset_map, clause, &toplev_predicate);
remap_hint_predicate (info, params_summary, callee_info,
&callee_info->loop_iterations,
operand_map, offset_map, clause, &toplev_predicate);
remap_hint_predicate (info, params_summary, callee_info,
&callee_info->loop_stride,
operand_map, offset_map, clause, &toplev_predicate);
HOST_WIDE_INT stack_frame_offset = ipa_get_stack_frame_offset (edge->callee);
HOST_WIDE_INT peak = stack_frame_offset + callee_info->estimated_stack_size;
if (info->estimated_stack_size < peak)
info->estimated_stack_size = peak;
inline_update_callee_summaries (edge->callee, es->loop_depth);
if (info->call_size_time_table)
{
int edge_size = 0;
sreal edge_time = 0;
estimate_edge_size_and_time (edge, &edge_size, NULL, &edge_time, vNULL,
vNULL, vNULL, 0);
/* Unaccount size and time of the optimized out call. */
info->account_size_time (-edge_size, -edge_time,
es->predicate ? *es->predicate : true,
es->predicate ? *es->predicate : true,
true);
/* Account new calls. */
summarize_calls_size_and_time (edge->callee, info);
}
/* Free summaries that are not maintained for inline clones/edges. */
ipa_call_summaries->remove (edge);
ipa_fn_summaries->remove (edge->callee);
ipa_remove_from_growth_caches (edge);
}
/* For performance reasons ipa_merge_fn_summary_after_inlining is not updating
overall size and time. Recompute it.
If RESET is true also recompute call_time_size_table. */
void
ipa_update_overall_fn_summary (struct cgraph_node *node, bool reset)
{
class ipa_fn_summary *info = ipa_fn_summaries->get (node);
class ipa_size_summary *size_info = ipa_size_summaries->get (node);
size_time_entry *e;
int i;
size_info->size = 0;
info->time = 0;
for (i = 0; vec_safe_iterate (info->size_time_table, i, &e); i++)
{
size_info->size += e->size;
info->time += e->time;
}
info->min_size = (*info->size_time_table)[0].size;
if (reset)
vec_free (info->call_size_time_table);
if (node->callees || node->indirect_calls)
estimate_calls_size_and_time (node, &size_info->size, &info->min_size,
&info->time, NULL,
~(clause_t) (1 << predicate::false_condition),
vNULL, vNULL, vNULL);
size_info->size = RDIV (size_info->size, ipa_fn_summary::size_scale);
info->min_size = RDIV (info->min_size, ipa_fn_summary::size_scale);
}
/* This function performs intraprocedural analysis in NODE that is required to
inline indirect calls. */
static void
inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
{
ipa_analyze_node (node);
if (dump_file && (dump_flags & TDF_DETAILS))
{
ipa_print_node_params (dump_file, node);
ipa_print_node_jump_functions (dump_file, node);
}
}
/* Note function body size. */
void
inline_analyze_function (struct cgraph_node *node)
{
push_cfun (DECL_STRUCT_FUNCTION (node->decl));
if (dump_file)
fprintf (dump_file, "\nAnalyzing function: %s\n", node->dump_name ());
if (opt_for_fn (node->decl, optimize) && !node->thunk.thunk_p)
inline_indirect_intraprocedural_analysis (node);
compute_fn_summary (node, false);
if (!optimize)
{
struct cgraph_edge *e;
for (e = node->callees; e; e = e->next_callee)
e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
for (e = node->indirect_calls; e; e = e->next_callee)
e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
}
pop_cfun ();
}
/* Called when new function is inserted to callgraph late. */
void
ipa_fn_summary_t::insert (struct cgraph_node *node, ipa_fn_summary *)
{
inline_analyze_function (node);
}
/* Note function body size. */
static void
ipa_fn_summary_generate (void)
{
struct cgraph_node *node;
FOR_EACH_DEFINED_FUNCTION (node)
if (DECL_STRUCT_FUNCTION (node->decl))
node->versionable = tree_versionable_function_p (node->decl);
ipa_fn_summary_alloc ();
ipa_fn_summaries->enable_insertion_hook ();
ipa_register_cgraph_hooks ();
FOR_EACH_DEFINED_FUNCTION (node)
if (!node->alias
&& (flag_generate_lto || flag_generate_offload|| flag_wpa
|| opt_for_fn (node->decl, optimize)))
inline_analyze_function (node);
}
/* Write inline summary for edge E to OB. */
static void
read_ipa_call_summary (class lto_input_block *ib, struct cgraph_edge *e,
bool prevails)
{
class ipa_call_summary *es = prevails
? ipa_call_summaries->get_create (e) : NULL;
predicate p;
int length, i;
int size = streamer_read_uhwi (ib);
int time = streamer_read_uhwi (ib);
int depth = streamer_read_uhwi (ib);
if (es)
{
es->call_stmt_size = size;
es->call_stmt_time = time;
es->loop_depth = depth;
}
bitpack_d bp = streamer_read_bitpack (ib);
if (es)
es->is_return_callee_uncaptured = bp_unpack_value (&bp, 1);
else
bp_unpack_value (&bp, 1);
p.stream_in (ib);
if (es)
edge_set_predicate (e, &p);
length = streamer_read_uhwi (ib);
if (length && es && e->possibly_call_in_translation_unit_p ())
{
es->param.safe_grow_cleared (length);
for (i = 0; i < length; i++)
es->param[i].change_prob = streamer_read_uhwi (ib);
}
else
{
for (i = 0; i < length; i++)
streamer_read_uhwi (ib);
}
}
/* Stream in inline summaries from the section. */
static void
inline_read_section (struct lto_file_decl_data *file_data, const char *data,
size_t len)
{
const struct lto_function_header *header =
(const struct lto_function_header *) data;
const int cfg_offset = sizeof (struct lto_function_header);
const int main_offset = cfg_offset + header->cfg_size;
const int string_offset = main_offset + header->main_size;
class data_in *data_in;
unsigned int i, count2, j;
unsigned int f_count;
lto_input_block ib ((const char *) data + main_offset, header->main_size,
file_data->mode_table);
data_in =
lto_data_in_create (file_data, (const char *) data + string_offset,
header->string_size, vNULL);
f_count = streamer_read_uhwi (&ib);
for (i = 0; i < f_count; i++)
{
unsigned int index;
struct cgraph_node *node;
class ipa_fn_summary *info;
class ipa_node_params *params_summary;
class ipa_size_summary *size_info;
lto_symtab_encoder_t encoder;
struct bitpack_d bp;
struct cgraph_edge *e;
predicate p;
index = streamer_read_uhwi (&ib);
encoder = file_data->symtab_node_encoder;
node = dyn_cast<cgraph_node *> (lto_symtab_encoder_deref (encoder,
index));
info = node->prevailing_p () ? ipa_fn_summaries->get_create (node) : NULL;
params_summary = node->prevailing_p () ? IPA_NODE_REF (node) : NULL;
size_info = node->prevailing_p ()
? ipa_size_summaries->get_create (node) : NULL;
int stack_size = streamer_read_uhwi (&ib);
int size = streamer_read_uhwi (&ib);
sreal time = sreal::stream_in (&ib);
if (info)
{
info->estimated_stack_size
= size_info->estimated_self_stack_size = stack_size;
size_info->size = size_info->self_size = size;
info->time = time;
}
bp = streamer_read_bitpack (&ib);
if (info)
{
info->inlinable = bp_unpack_value (&bp, 1);
info->fp_expressions = bp_unpack_value (&bp, 1);
}
else
{
bp_unpack_value (&bp, 1);
bp_unpack_value (&bp, 1);
}
count2 = streamer_read_uhwi (&ib);
gcc_assert (!info || !info->conds);
if (info)
vec_safe_reserve_exact (info->conds, count2);
for (j = 0; j < count2; j++)
{
struct condition c;
unsigned int k, count3;
c.operand_num = streamer_read_uhwi (&ib);
c.code = (enum tree_code) streamer_read_uhwi (&ib);
c.type = stream_read_tree (&ib, data_in);
c.val = stream_read_tree (&ib, data_in);
bp = streamer_read_bitpack (&ib);
c.agg_contents = bp_unpack_value (&bp, 1);
c.by_ref = bp_unpack_value (&bp, 1);
if (c.agg_contents)
c.offset = streamer_read_uhwi (&ib);
count3 = streamer_read_uhwi (&ib);
c.param_ops = NULL;
if (info)
vec_safe_reserve_exact (c.param_ops, count3);
if (params_summary)
ipa_set_param_used_by_ipa_predicates
(params_summary, c.operand_num, true);
for (k = 0; k < count3; k++)
{
struct expr_eval_op op;
enum gimple_rhs_class rhs_class;
op.code = (enum tree_code) streamer_read_uhwi (&ib);
op.type = stream_read_tree (&ib, data_in);
switch (rhs_class = get_gimple_rhs_class (op.code))
{
case GIMPLE_UNARY_RHS:
op.index = 0;
op.val[0] = NULL_TREE;
op.val[1] = NULL_TREE;
break;
case GIMPLE_BINARY_RHS:
case GIMPLE_TERNARY_RHS:
bp = streamer_read_bitpack (&ib);
op.index = bp_unpack_value (&bp, 2);
op.val[0] = stream_read_tree (&ib, data_in);
if (rhs_class == GIMPLE_BINARY_RHS)
op.val[1] = NULL_TREE;
else
op.val[1] = stream_read_tree (&ib, data_in);
break;
default:
fatal_error (UNKNOWN_LOCATION,
"invalid fnsummary in LTO stream");
}
if (info)
c.param_ops->quick_push (op);
}
if (info)
info->conds->quick_push (c);
}
count2 = streamer_read_uhwi (&ib);
gcc_assert (!info || !info->size_time_table);
if (info && count2)
vec_safe_reserve_exact (info->size_time_table, count2);
for (j = 0; j < count2; j++)
{
class size_time_entry e;
e.size = streamer_read_uhwi (&ib);
e.time = sreal::stream_in (&ib);
e.exec_predicate.stream_in (&ib);
e.nonconst_predicate.stream_in (&ib);
if (info)
info->size_time_table->quick_push (e);
}
p.stream_in (&ib);
if (info)
set_hint_predicate (&info->loop_iterations, p);
p.stream_in (&ib);
if (info)
set_hint_predicate (&info->loop_stride, p);
for (e = node->callees; e; e = e->next_callee)
read_ipa_call_summary (&ib, e, info != NULL);
for (e = node->indirect_calls; e; e = e->next_callee)
read_ipa_call_summary (&ib, e, info != NULL);
}
lto_free_section_data (file_data, LTO_section_ipa_fn_summary, NULL, data,
len);
lto_data_in_delete (data_in);
}
/* Read inline summary. Jump functions are shared among ipa-cp
and inliner, so when ipa-cp is active, we don't need to write them
twice. */
static void
ipa_fn_summary_read (void)
{
struct lto_file_decl_data **file_data_vec = lto_get_file_decl_data ();
struct lto_file_decl_data *file_data;
unsigned int j = 0;
ipa_prop_read_jump_functions ();
ipa_fn_summary_alloc ();
while ((file_data = file_data_vec[j++]))
{
size_t len;
const char *data
= lto_get_summary_section_data (file_data, LTO_section_ipa_fn_summary,
&len);
if (data)
inline_read_section (file_data, data, len);
else
/* Fatal error here. We do not want to support compiling ltrans units
with different version of compiler or different flags than the WPA
unit, so this should never happen. */
fatal_error (input_location,
"ipa inline summary is missing in input file");
}
ipa_register_cgraph_hooks ();
gcc_assert (ipa_fn_summaries);
ipa_fn_summaries->enable_insertion_hook ();
}
/* Write inline summary for edge E to OB. */
static void
write_ipa_call_summary (struct output_block *ob, struct cgraph_edge *e)
{
class ipa_call_summary *es = ipa_call_summaries->get (e);
int i;
streamer_write_uhwi (ob, es->call_stmt_size);
streamer_write_uhwi (ob, es->call_stmt_time);
streamer_write_uhwi (ob, es->loop_depth);
bitpack_d bp = bitpack_create (ob->main_stream);
bp_pack_value (&bp, es->is_return_callee_uncaptured, 1);
streamer_write_bitpack (&bp);
if (es->predicate)
es->predicate->stream_out (ob);
else
streamer_write_uhwi (ob, 0);
streamer_write_uhwi (ob, es->param.length ());
for (i = 0; i < (int) es->param.length (); i++)
streamer_write_uhwi (ob, es->param[i].change_prob);
}
/* Write inline summary for node in SET.
Jump functions are shared among ipa-cp and inliner, so when ipa-cp is
active, we don't need to write them twice. */
static void
ipa_fn_summary_write (void)
{
struct output_block *ob = create_output_block (LTO_section_ipa_fn_summary);
lto_symtab_encoder_iterator lsei;
lto_symtab_encoder_t encoder = ob->decl_state->symtab_node_encoder;
unsigned int count = 0;
for (lsei = lsei_start_function_in_partition (encoder); !lsei_end_p (lsei);
lsei_next_function_in_partition (&lsei))
{
cgraph_node *cnode = lsei_cgraph_node (lsei);
if (cnode->definition && !cnode->alias)
count++;
}
streamer_write_uhwi (ob, count);
for (lsei = lsei_start_function_in_partition (encoder); !lsei_end_p (lsei);
lsei_next_function_in_partition (&lsei))
{
cgraph_node *cnode = lsei_cgraph_node (lsei);
if (cnode->definition && !cnode->alias)
{
class ipa_fn_summary *info = ipa_fn_summaries->get (cnode);
class ipa_size_summary *size_info = ipa_size_summaries->get (cnode);
struct bitpack_d bp;
struct cgraph_edge *edge;
int i;
size_time_entry *e;
struct condition *c;
streamer_write_uhwi (ob, lto_symtab_encoder_encode (encoder, cnode));
streamer_write_hwi (ob, size_info->estimated_self_stack_size);
streamer_write_hwi (ob, size_info->self_size);
info->time.stream_out (ob);
bp = bitpack_create (ob->main_stream);
bp_pack_value (&bp, info->inlinable, 1);
bp_pack_value (&bp, false, 1);
bp_pack_value (&bp, info->fp_expressions, 1);
streamer_write_bitpack (&bp);
streamer_write_uhwi (ob, vec_safe_length (info->conds));
for (i = 0; vec_safe_iterate (info->conds, i, &c); i++)
{
int j;
struct expr_eval_op *op;
streamer_write_uhwi (ob, c->operand_num);
streamer_write_uhwi (ob, c->code);
stream_write_tree (ob, c->type, true);
stream_write_tree (ob, c->val, true);
bp = bitpack_create (ob->main_stream);
bp_pack_value (&bp, c->agg_contents, 1);
bp_pack_value (&bp, c->by_ref, 1);
streamer_write_bitpack (&bp);
if (c->agg_contents)
streamer_write_uhwi (ob, c->offset);
streamer_write_uhwi (ob, vec_safe_length (c->param_ops));
for (j = 0; vec_safe_iterate (c->param_ops, j, &op); j++)
{
streamer_write_uhwi (ob, op->code);
stream_write_tree (ob, op->type, true);
if (op->val[0])
{
bp = bitpack_create (ob->main_stream);
bp_pack_value (&bp, op->index, 2);
streamer_write_bitpack (&bp);
stream_write_tree (ob, op->val[0], true);
if (op->val[1])
stream_write_tree (ob, op->val[1], true);
}
}
}
streamer_write_uhwi (ob, vec_safe_length (info->size_time_table));
for (i = 0; vec_safe_iterate (info->size_time_table, i, &e); i++)
{
streamer_write_uhwi (ob, e->size);
e->time.stream_out (ob);
e->exec_predicate.stream_out (ob);
e->nonconst_predicate.stream_out (ob);
}
if (info->loop_iterations)
info->loop_iterations->stream_out (ob);
else
streamer_write_uhwi (ob, 0);
if (info->loop_stride)
info->loop_stride->stream_out (ob);
else
streamer_write_uhwi (ob, 0);
for (edge = cnode->callees; edge; edge = edge->next_callee)
write_ipa_call_summary (ob, edge);
for (edge = cnode->indirect_calls; edge; edge = edge->next_callee)
write_ipa_call_summary (ob, edge);
}
}
streamer_write_char_stream (ob->main_stream, 0);
produce_asm (ob, NULL);
destroy_output_block (ob);
ipa_prop_write_jump_functions ();
}
/* Release function summary. */
void
ipa_free_fn_summary (void)
{
if (!ipa_call_summaries)
return;
ggc_delete (ipa_fn_summaries);
ipa_fn_summaries = NULL;
delete ipa_call_summaries;
ipa_call_summaries = NULL;
edge_predicate_pool.release ();
/* During IPA this is one of largest datastructures to release. */
if (flag_wpa)
ggc_trim ();
}
/* Release function summary. */
void
ipa_free_size_summary (void)
{
if (!ipa_size_summaries)
return;
delete ipa_size_summaries;
ipa_size_summaries = NULL;
}
namespace {
const pass_data pass_data_local_fn_summary =
{
GIMPLE_PASS, /* type */
"local-fnsummary", /* name */
OPTGROUP_INLINE, /* optinfo_flags */
TV_INLINE_PARAMETERS, /* tv_id */
0, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
0, /* todo_flags_finish */
};
class pass_local_fn_summary : public gimple_opt_pass
{
public:
pass_local_fn_summary (gcc::context *ctxt)
: gimple_opt_pass (pass_data_local_fn_summary, ctxt)
{}
/* opt_pass methods: */
opt_pass * clone () { return new pass_local_fn_summary (m_ctxt); }
virtual unsigned int execute (function *)
{
return compute_fn_summary_for_current ();
}
}; // class pass_local_fn_summary
} // anon namespace
gimple_opt_pass *
make_pass_local_fn_summary (gcc::context *ctxt)
{
return new pass_local_fn_summary (ctxt);
}
/* Free inline summary. */
namespace {
const pass_data pass_data_ipa_free_fn_summary =
{
SIMPLE_IPA_PASS, /* type */
"free-fnsummary", /* name */
OPTGROUP_NONE, /* optinfo_flags */
TV_IPA_FREE_INLINE_SUMMARY, /* tv_id */
0, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
0, /* todo_flags_finish */
};
class pass_ipa_free_fn_summary : public simple_ipa_opt_pass
{
public:
pass_ipa_free_fn_summary (gcc::context *ctxt)
: simple_ipa_opt_pass (pass_data_ipa_free_fn_summary, ctxt),
small_p (false)
{}
/* opt_pass methods: */
opt_pass *clone () { return new pass_ipa_free_fn_summary (m_ctxt); }
void set_pass_param (unsigned int n, bool param)
{
gcc_assert (n == 0);
small_p = param;
}
virtual bool gate (function *) { return true; }
virtual unsigned int execute (function *)
{
ipa_free_fn_summary ();
if (!flag_wpa)
ipa_free_size_summary ();
return 0;
}
private:
bool small_p;
}; // class pass_ipa_free_fn_summary
} // anon namespace
simple_ipa_opt_pass *
make_pass_ipa_free_fn_summary (gcc::context *ctxt)
{
return new pass_ipa_free_fn_summary (ctxt);
}
namespace {
const pass_data pass_data_ipa_fn_summary =
{
IPA_PASS, /* type */
"fnsummary", /* name */
OPTGROUP_INLINE, /* optinfo_flags */
TV_IPA_FNSUMMARY, /* tv_id */
0, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
( TODO_dump_symtab ), /* todo_flags_finish */
};
class pass_ipa_fn_summary : public ipa_opt_pass_d
{
public:
pass_ipa_fn_summary (gcc::context *ctxt)
: ipa_opt_pass_d (pass_data_ipa_fn_summary, ctxt,
ipa_fn_summary_generate, /* generate_summary */
ipa_fn_summary_write, /* write_summary */
ipa_fn_summary_read, /* read_summary */
NULL, /* write_optimization_summary */
NULL, /* read_optimization_summary */
NULL, /* stmt_fixup */
0, /* function_transform_todo_flags_start */
NULL, /* function_transform */
NULL) /* variable_transform */
{}
/* opt_pass methods: */
virtual unsigned int execute (function *) { return 0; }
}; // class pass_ipa_fn_summary
} // anon namespace
ipa_opt_pass_d *
make_pass_ipa_fn_summary (gcc::context *ctxt)
{
return new pass_ipa_fn_summary (ctxt);
}
/* Reset all state within ipa-fnsummary.c so that we can rerun the compiler
within the same process. For use by toplev::finalize. */
void
ipa_fnsummary_c_finalize (void)
{
ipa_free_fn_summary ();
}
|
c_fft.c | /* ***********************************************************************
This program is part of the
OpenMP Source Code Repository
http://www.pcg.ull.es/ompscr/
e-mail: ompscr@etsii.ull.es
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
(LICENSE file) along with this program; if not, write to
the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
Boston, MA 02111-1307 USA
FILE: c_fft.c
VERSION: 1.0
DATE: May 2004
AUTHOR: F. de Sande
COMMENTS TO: sande@csi.ull.es
DESCRIPTION: This program computes the Fast Fourier Transform
on an input signal
COMMENTS: The algorithm uses a divide and conquer strategy
and the transform is computed as a combination of the
transforms of the even and odd terms of the original signal.
The code requires nested Parallelism.
Function write_array() is provided only for debuging purposes.
(use a small size signal if you want to write it).
REFERENCES: James W. Cooley and John W. Tukey,
An Algorithm for the Machine Calculation of Complex Fourier Series,
Mathematics of Computation, 1965, vol. 19, no. 90, pg 297-301
http://en.wikipedia.org/wiki/Cooley-Tukey_FFT_algorithm
BASIC PRAGMAS: parallel for
USAGE: ./c_fft.par 8192
INPUT: The size of the input signal
OUTPUT: The code tests the correctness of the result for the input
FILE FORMATS: -
RESTRICTIONS: The size of the input signal MUST be a power of 2
REVISION HISTORY:
**************************************************************************/
/* the expected result is that no variable should be reported as race condition, since it can make array A[i + n] only written by one thread */
#include "OmpSCR.h"
#include <math.h>
#define KILO (1024)
#define DEFAULT_SIZE_IN_KB (64)
#define NUM_ARGS 1
#define NUM_TIMERS 1
typedef double doubleType;
typedef struct {
doubleType re;
doubleType im;
} Complex;
/* -----------------------------------------------------------------------
PROTOTYPES
* ----------------------------------------------------------------------- */
void initialize(unsigned Size, Complex *a);
void write_array(unsigned Size, Complex *a);
int test_array(unsigned Size, Complex *a);
void FFT(Complex *A, Complex *a, Complex *W, unsigned N, unsigned stride, Complex *D);
void Roots(unsigned Size, Complex *W);
unsigned get_params(int argc, char *argv[]);
/* -----------------------------------------------------------------------
IMPLEMENTATION
* ----------------------------------------------------------------------- */
/* -----------------------------------------------------------------------
Routine: initialize
Description: Initialise a vector of complex numbers
Comment: all numbers have real part 1.0 and imaginary part 0.0
* ----------------------------------------------------------------------- */
void initialize(unsigned Size, Complex *a) {
unsigned i;
for(i = 0; i < Size; i++) {
a[i].re = 1.0;
a[i].im = 0.0;
}
}
/* -----------------------------------------------------------------------
Routine: write_array
Description: Display a vector of complex numbers
* ----------------------------------------------------------------------- */
void write_array(unsigned Size, Complex *a) {
unsigned i;
for(i = 0; i < Size; i++)
printf("a[%2u] = [%.8lf,%.8lf]\n", i, a[i].re, a[i].im);
}
/* -----------------------------------------------------------------------
Routine: test_array
Description: Test is true if the complex vector is of the form
[(Size,0),(0,0),...,(0,0)]
* ----------------------------------------------------------------------- */
int test_array(unsigned Size, Complex *a) {
register unsigned i;
unsigned OK = 1;
if((a[0].re == Size) && (a[0].im == 0)) {
for(i = 1; i < Size; i++)
if (a[i].re != 0.0 || a[i].im != 0.0) {
OK = 0;
break;
}
}
else OK = 0;
return OK;
}
/* -----------------------------------------------------------------------
Procedure: Roots
Description: Computes roots of the Unary
Parameters:
unsigned Size, number of roots to compute
Complex *W, vector containing the roots
* ----------------------------------------------------------------------- */
void Roots(unsigned Size, Complex *W) {
register unsigned i;
double phi;
Complex Omega;
phi = 4 * atan(1.0) / (double)Size; /* PI/Size */
Omega.re = cos(phi);
Omega.im = sin(phi);
W[0].re = 1.0;
W[0].im = 0.0;
for(i = 1; i < Size; i++) {
W[i].re = W[i-1].re * Omega.re - W[i-1].im * Omega.im;
W[i].im = W[i-1].re * Omega.im + W[i-1].im * Omega.re;
}
}
/* -----------------------------------------------------------------------
Procedure: FFT
Description: Recursive (divide and conquer) Fast Fourier Transform
Parameters:
Complex *A, transformed output signal
Complex *a, input signal
Complex *W, vector containing the roots
unsigned N, number of elements in a
unsigned stride, between consecutive elements in a to be considered
Complex *D, auxiliar vector to do combination
* ----------------------------------------------------------------------- */
void FFT(Complex *A, Complex *a, Complex *W, unsigned N,
unsigned stride, Complex *D) {
Complex *B, *C;
Complex Aux, *pW;
unsigned n;
int i;
if (N == 1) {
A[0].re = a[0].re;
A[0].im = a[0].im;
}
else {
/* Division stage without copying input data */
n = (N >> 1); /* N = N div 2 */
/* Subproblems resolution stage */
#pragma omp parallel
#pragma omp for
for(i = 0; i <= 1; i++) {
FFT(D + i * n, a + i * stride, W, n, stride << 1, A + i * n);
}
/* Combination stage */
B = D;
C = D + n;
#pragma omp parallel default(none) private( Aux, pW) shared(stride, n, A, B, C, W)
#pragma omp for
for(i = 0; i <= n - 1; i++) {
pW = W + i * stride;
Aux.re = pW->re * C[i].re - pW->im * C[i].im;
Aux.im = pW->re * C[i].im + pW->im * C[i].re;
A[i].re = B[i].re + Aux.re;
A[i].im = B[i].im + Aux.im;
A[i+n].re = B[i].re - Aux.re;
A[i+n].im = B[i].im - Aux.im;
}
}
}
/* ----------------------------------------------------------------------- */
unsigned get_params(int argc, char *argv[]) {
char usage_str[] = "<size_in_Kb>";
unsigned sizeInKb;
if (argc == 2)
sizeInKb = atoi(argv[1]);
else
if (argc == 1)
sizeInKb = DEFAULT_SIZE_IN_KB;
else {
printf("\nUse: %s %s\n", argv[0], usage_str);
exit(-1);
}
printf("\nUse: %s %s\n", argv[0], usage_str);
printf("Running with Size: %d K\n", sizeInKb);
return sizeInKb;
}
/* ----------------------------------------------------------------------- */
int main(int argc, char *argv[]) {
unsigned N;
Complex *a, *A, *W, *D;
int NUMTHREADS;
char *PARAM_NAMES[NUM_ARGS] = {"Size of the input signal (in Kb)"};
char *TIMERS_NAMES[NUM_TIMERS] = {"Total_time" };
char *DEFAULT_VALUES[NUM_ARGS] = {"64"};
NUMTHREADS = omp_get_max_threads();
OSCR_init (NUMTHREADS, "Divide and Conquer Fast Fourier Transform.", "Use 'fft' <size (in K)>", NUM_ARGS,
PARAM_NAMES, DEFAULT_VALUES , NUM_TIMERS, NUM_TIMERS, TIMERS_NAMES,
argc, argv);
N = KILO * OSCR_getarg_int(1);
/* N = KILO * get_params(argc, argv); */
/* Memory allocation */
a = (Complex*)calloc(N, sizeof(Complex));
A = (Complex*)calloc(N, sizeof(Complex));
D = (Complex*)calloc(N, sizeof(Complex));
W = (Complex*)calloc(N>>1, sizeof(Complex));
if((a==NULL) || (A==NULL) || (D==NULL) || (W==NULL)) {
printf("Not enough memory initializing arrays\n");
exit(1);
}
initialize(N, a); /* Generate test input signal */
/* write_array(N, a); */
Roots(N >> 1, W); /* Initialise the vector of imaginary roots */
OSCR_timer_start(0);
FFT(A, a, W, N, 1, D);
OSCR_timer_stop(0);
/* write_array(N, A); */
/* Display results and time */
printf("Test array: ");
if (test_array(N, A))
printf("Ok\n");
else
printf("Fails\n");
OSCR_report(1, TIMERS_NAMES);
free(W);
free(D);
free(A);
free(a);
return 0;
}
/*
* vim:ts=2:sw=2:
*/
|
omp_task_untied.c | <ompts:test>
<ompts:testdescription>Test for untied clause. First generate a set of tasks and pause it immediately. Then we resume half of them and check whether they are scheduled by different threads</ompts:testdescription>
<ompts:ompversion>3.0</ompts:ompversion>
<ompts:directive>omp task untied</ompts:directive>
<ompts:dependences>omp taskwait</ompts:dependences>
<ompts:testcode>
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
#include "omp_my_sleep.h"
int <ompts:testcode:functionname>omp_task_untied</ompts:testcode:functionname>(FILE * logFile){
<ompts:orphan:vars>
int i;
int count;
int start_tid[NUM_TASKS];
int current_tid[NUM_TASKS];
</ompts:orphan:vars>
count = 0;
/*initialization*/
for (i=0; i< NUM_TASKS; i++){
start_tid[i]=0;
current_tid[i]=0;
}
#pragma omp parallel firstprivate(i)
{
#pragma omp single
{
for (i = 0; i < NUM_TASKS; i++) {
<ompts:orphan>
int myi = i;
#pragma omp task <ompts:check>untied</ompts:check>
{
my_sleep(SLEEPTIME);
start_tid[myi] = omp_get_thread_num();
current_tid[myi] = omp_get_thread_num();
#pragma omp taskwait
<ompts:check>if((start_tid[myi] %2) !=0){</ompts:check>
my_sleep(SLEEPTIME);
current_tid[myi] = omp_get_thread_num();
<ompts:check>
} /* end of if */
else {
current_tid[myi] = omp_get_thread_num();
}
</ompts:check>
} /*end of omp task */
</ompts:orphan>
} /* end of for */
} /* end of single */
} /* end of parallel */
for (i=0;i<NUM_TASKS; i++)
{
printf("start_tid[%d]=%d, current_tid[%d]=%d\n",i, start_tid[i], i , current_tid[i]);
if (current_tid[i] == start_tid[i])
count++;
}
return (count<NUM_TASKS);
}
</ompts:testcode>
</ompts:test>
|
fc_hcl_x86.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "fc_param.h"
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "module/module.h"
#include "operator/op.h"
#include "utility/sys_port.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <math.h>
#include <string.h>
#if __SSE2__
#include <emmintrin.h>
#endif
#if __AVX__
#include <immintrin.h>
#endif
struct fc_data
{
int need_trans;
int batch; // N
int out_number; // OUT
int hidden; // hidden
int zero[3]; // input, kernel, output
float scale[3]; // input, kernel, output
};
static int innerproduct(int inn, int inc, int inh, int inw, int outc, const float* weight, const float* input, float* output,
const float* _bias, int num_thread, int cpu_affinity)
{
size_t elemsize = sizeof(float);
int size = inw * inh;
float tmp;
for (int n = 0; n < inn; n++)
{
#pragma omp parallel for num_threads(num_thread)
for (int p = 0; p < outc; p++)
{
int q = 0;
float sum = _bias ? _bias[p] : 0.f;
const float* weight1 = weight + p * inc * size;
const float* input1 = input + n * inc * size;
#if __AVX__ || __SSE__
#if __SSE__
float _sum[4] = {0.f};
__m128 _sum0 = _mm_set1_ps(0.f);
for (; q + 3 < inc * size; q = q + 4)
{
__m128 _input = _mm_loadu_ps(input1 + q);
__m128 _weight = _mm_loadu_ps(weight1 + q);
__m128 _sum1 = _mm_mul_ps(_input, _weight);
_sum0 = _mm_add_ps(_sum0, _sum1);
}
_mm_storeu_ps(_sum, _sum0);
tmp = _sum[0] + _sum[1] + _sum[2] + _sum[3];
sum = sum + tmp;
#else //__AVX__
// TODO
#endif
#endif
for (; q < inc * size; q++)
{
tmp = input1[q] * weight1[q];
sum = sum + tmp;
}
output[n * outc + p] = sum;
}
}
return 0;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct fc_data* op_param = ( struct fc_data* )sys_malloc(sizeof(struct fc_data));
memset(op_param, 0, sizeof(struct fc_data));
exec_node->ops_priv = op_param;
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
sys_free(exec_node->ops_priv);
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor;
struct tensor* weight_tensor;
struct tensor* output_tensor;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
weight_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct fc_param* param = ( struct fc_param* )ir_node->op.param_mem;
struct fc_data* op_param = ( struct fc_data* )exec_node->ops_priv;
if (ir_graph->graph_layout == TENGINE_LAYOUT_NCHW)
{
int hidden = input_tensor->dims[1];
if (input_tensor->dim_num > 2)
hidden = hidden * input_tensor->dims[2];
if (input_tensor->dim_num > 3)
hidden = hidden * input_tensor->dims[3];
op_param->hidden = hidden;
}
else
{
int hidden = 0;
if (input_tensor->dim_num == 2)
hidden = input_tensor->dims[1];
if (input_tensor->dim_num == 3)
hidden = input_tensor->dims[1] * input_tensor->dims[2];
if (input_tensor->dim_num == 4)
hidden = input_tensor->dims[1] * input_tensor->dims[2] * input_tensor->dims[3];
op_param->hidden = hidden;
}
op_param->batch = input_tensor->dims[0];
op_param->out_number = param->num_output;
int weight_out = weight_tensor->dims[0];
if (weight_out == op_param->out_number)
op_param->need_trans = 0;
else
op_param->need_trans = 1;
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor;
struct tensor* weight_tensor;
struct tensor* bias_tensor;
struct tensor* output_tensor;
int num_thread = exec_graph->num_thread;
int cpu_affinity = exec_graph->cpu_affinity;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
weight_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct fc_param* param = ( struct fc_param* )ir_node->op.param_mem;
struct fc_data* op_param = ( struct fc_data* )exec_node->ops_priv;
const void* input_data = input_tensor->data;
void* weight_data = weight_tensor->data;
void* output_data = output_tensor->data;
int batch_number = input_tensor->dims[0];
int inc = input_tensor->dims[1];
int inh = input_tensor->dims[2] ? input_tensor->dims[2] : 1;
int inw = input_tensor->dims[3] ? input_tensor->dims[3] : 1;
int outc = output_tensor->dims[1];
void* bias_data = NULL;
if (ir_node->input_num > 2)
{
bias_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[2]);
bias_data = bias_tensor->data;
}
if (innerproduct(batch_number, inc, inh, inw, outc, weight_data, input_data, output_data, bias_data, num_thread, cpu_affinity) < 0)
return -1;
return 0;
}
static int reshape(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* node = exec_node->ir_node;
struct graph* graph = node->graph;
struct tensor* input = get_ir_graph_tensor(graph, node->input_tensors[0]);
struct tensor* weight = get_ir_graph_tensor(graph, node->input_tensors[1]);
struct tensor* output = get_ir_graph_tensor(graph, node->output_tensors[0]);
int dim[4];
int n = weight->dims[0];
int k = weight->dims[1];
int m = input->dims[0];
int input_k = input->dims[1];
if (input->dim_num == 2)
{
dim[0] = m;
dim[1] = n;
}
else if (input->dim_num == 3)
{
if (input->dims[2] != 0)
input_k *= input->dims[2];
if (graph->graph_layout == TENGINE_LAYOUT_NHWC)
{
dim[0] = m;
dim[1] = 1;
dim[2] = n;
}
else
{
dim[0] = m;
dim[1] = n;
dim[2] = 1;
}
}
else if (input->dim_num == 4)
{
if (input->dims[2] * input->dims[3] != 0)
input_k *= input->dims[2] * input->dims[3];
if (graph->graph_layout == TENGINE_LAYOUT_NHWC)
{
dim[0] = m;
dim[1] = 1;
dim[2] = 1;
dim[3] = n;
}
else
{
dim[0] = m;
dim[1] = n;
dim[2] = 1;
dim[3] = 1;
}
}
else
return -1;
if (k != input_k)
{
TLOG_ERR("fc: input tensor and weight tensor shape does not match, hidden_number: %d\n", k);
return -1;
}
int ret = set_ir_tensor_shape(output, dim, input->dim_num);
return ret;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node)
{
struct node* ir_node = exec_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
/* todo support uint8 */
if (input_tensor->data_type != TENGINE_DT_FP32)
return 0;
return OPS_SCORE_BEST;
}
static struct node_ops hcl_node_ops = {.prerun = prerun,
.run = run,
.reshape = reshape,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
int register_fc_hcl_x86_op()
{
return register_builtin_node_ops(OP_FC, &hcl_node_ops);
}
int unregister_fc_hcl_x86_op()
{
return unregister_builtin_node_ops(OP_FC, &hcl_node_ops);
}
|
gradient.c | /********************************************************************[libaroma]*
* Copyright (C) 2011-2015 Ahmad Amarullah (http://amarullz.com/)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*______________________________________________________________________________
*
* Filename : gradient.c
* Description : gradient drawing
*
* + This is part of libaroma, an embedded ui toolkit.
* + 06/04/15 - Author(s): Ahmad Amarullah
*
*/
#ifndef __libaroma_gradient_c__
#define __libaroma_gradient_c__
#include <aroma_internal.h>
bytep _libaroma_gradient_corner(
int r) {
if (r < 1) {
ALOGW("_libaroma_gradient_corner radius<1");
return NULL;
}
/* Allocating Memory */
int i, n;
int sz = r * r;
bytep out = calloc(sz,1);
/* Pythagoras Based */
for (i = 1; i <= r; i++) {
float w = sqrt(sz - i * i);
int fw = (int) floor(w);
byte t = (byte) MIN(round((w - ((float) fw)) * 0xff), 0xff);
int idx = ((r - i) * r) + (r - fw - 1);
out[idx] = (byte) MAX(t, out[idx]);
idx = ((r - fw - 1) * r) + (r - i);
out[idx] = (byte) MAX(t, out[idx]);
/* Set opaque for leftover pixels */
for (n = 1; n <= fw; n++) {
idx = ((r - i) * r) + (r - n);
out[idx] = 0xff;
}
}
return out;
}
static inline void _libaroma_gradient_draw_rounded(
wordp __restrict line_mem, bytep __restrict line_alpha,
wordp __restrict roundTmp,
bytep __restrict roundData, int roundSize, byte isRight, int y) {
int i;
if (line_alpha != NULL) {
if (isRight) {
for (i = 0; i < roundSize; i++) {
line_alpha[i] = MAX(line_alpha[i] +
roundData[y * roundSize + (roundSize - i) - 1] - 0xff, 0);
}
}
else {
for (i = 0; i < roundSize; i++) {
line_alpha[i] = MAX(line_alpha[i] +
roundData[y * roundSize + i] - 0xff, 0);
}
}
}
else {
if (isRight) {
for (i = 0; i < roundSize; i++) {
line_mem[i] = libaroma_alpha(roundTmp[i],
line_mem[i], roundData[y * roundSize + (roundSize - i) - 1]);
}
}
else {
libaroma_alpha_px(
roundSize,
line_mem,
roundTmp,
line_mem,
roundData+y*roundSize
);/*
for (i = 0; i < roundSize; i++) {
line_mem[i] = libaroma_alpha(roundTmp[i],
line_mem[i], roundData[y * roundSize + i]);
}*/
}
}
}
/*
* Function : libaroma_gradient_ex1
* Return Value: byte
* Descriptions: draw gradient rectangle
*/
byte libaroma_gradient_ex1(
LIBAROMA_CANVASP dst,
int x, int y, int w, int h,
word startColor, word endColor,
int roundSize, word roundFlag,
byte startAlpha, byte endAlpha,
byte flags) {
if (dst == NULL) {
dst = libaroma_fb()->canvas;
}
byte noDither = (flags&LIBAROMA_DRAW_NODITHER)?1:0;
/* fix position */
int x2 = x + w;
int y2 = y + h;
if (x2 > dst->w) {
x2 = dst->w;
}
if (y2 > dst->h) {
y2 = dst->h;
}
if (x < 0) {
x = 0;
}
if (y < 0) {
y = 0;
}
byte samecolor = (startColor==endColor)?1:0;
/* alpha handling */
byte useAlpha = 1;
byte useCanvasAlpha = 0;
byte ignoreAlpha = (roundSize < 0) ? 1 : 0;
if ((startAlpha == 0xff) && (endAlpha == 0xff)) {
useAlpha = 0;
}
if (!(flags&LIBAROMA_DRAW_NO_DST_ALPHA)){
if (dst->alpha != NULL) {
useCanvasAlpha = 1;
useAlpha = 0;
}
}
if (ignoreAlpha) {
roundSize = 0;
useAlpha = 0;
useCanvasAlpha = 0;
}
/* prepare */
w = x2 - x;
h = y2 - y;
if (roundSize > h / 2) {
roundSize = h / 2;
}
if (roundSize > w / 2) {
roundSize = w / 2;
}
byte roundCorners[4] = { 0, 0, 0, 0 };
bytep roundData = NULL;
if ((roundSize > 0) && (roundFlag != 0)) {
roundCorners[0] = ((roundFlag & 0x1000) == 0x1000) ? 1 : 0;
roundCorners[1] = ((roundFlag & 0x0100) == 0x0100) ? 1 : 0;
roundCorners[2] = ((roundFlag & 0x0010) == 0x0010) ? 1 : 0;
roundCorners[3] = ((roundFlag & 0x0001) == 0x0001) ? 1 : 0;
roundData = _libaroma_gradient_corner(roundSize);
}
/* draw */
int _Y;
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (_Y = 0; _Y < h; _Y++) {
bytep line_alpha = NULL;
wordp roundTmp = NULL;
if (roundData!=NULL){
roundTmp = (wordp) malloc(roundSize * 2 * 2);
}
int ypos = y + _Y;
byte cR=0,cG=0,cB=0;
#ifdef LIBAROMA_CONFIG_GRADIENT_FLOAT
float intensity = ((float) _Y) / ((float) h);
float r_intensity = 1.0 - intensity;
if (!samecolor){
cR = ((byte) MIN(((libaroma_color_r(startColor) * r_intensity) +
(libaroma_color_r(endColor) * intensity)), 0xff) );
cG = ((byte) MIN(((libaroma_color_g(startColor) * r_intensity) +
(libaroma_color_g(endColor) * intensity)), 0xff) );
cB = ((byte) MIN(((libaroma_color_b(startColor) * r_intensity) +
(libaroma_color_b(endColor) * intensity)), 0xff) );
}
#else
word intensity = (_Y * 0x100) / h;
word r_intensity = 0x100 - intensity;
if (!samecolor){
cR = ((byte) MIN((((libaroma_color_r(startColor) * r_intensity)>>8)+
((libaroma_color_r(endColor) * intensity) >> 8)), 0xff) );
cG = ((byte) MIN((((libaroma_color_g(startColor) * r_intensity)>>8)+
((libaroma_color_g(endColor) * intensity) >> 8)), 0xff) );
cB = ((byte) MIN((((libaroma_color_b(startColor) * r_intensity)>>8)+
((libaroma_color_b(endColor) * intensity) >> 8)), 0xff) );
}
#endif
int data_posxy = (ypos * dst->l) + x;
wordp line_mem = (wordp) dst->data + data_posxy;
if (useCanvasAlpha) {
line_alpha = dst->alpha+data_posxy;
}
/* Save Bg Data */
byte drawRound = 0;
if (roundData != NULL) {
if (_Y < roundSize) {
if (roundCorners[0]) {
memcpy(roundTmp, line_mem, roundSize * 2);
drawRound = 1;
}
if (roundCorners[1]) {
memcpy(roundTmp + roundSize,
line_mem + w - roundSize, roundSize * 2);
drawRound = 1;
}
}
else if (_Y >= h - roundSize) {
if (roundCorners[2]) {
memcpy(roundTmp, line_mem, roundSize * 2);
drawRound = 1;
}
if (roundCorners[3]) {
memcpy(roundTmp + roundSize,
line_mem + w - roundSize, roundSize * 2);
drawRound = 1;
}
}
}
/* Draw Now */
if (useAlpha) {
byte cA = startAlpha;
if (startAlpha!=endAlpha){
#ifdef LIBAROMA_CONFIG_GRADIENT_FLOAT
cA = ((byte) MIN(
((startAlpha * r_intensity) + (endAlpha * intensity)), 0xff));
#else
cA = ((byte) MIN(
(((startAlpha * r_intensity) >> 8) +
((endAlpha * intensity) >> 8)), 0xff));
#endif
}
if (!samecolor){
if (noDither){
/*
libaroma_color_set(alphaTmpLine, libaroma_rgb(cR, cG, cB), w);
libaroma_alpha_const(w,
line_mem, line_mem, alphaTmpLine, cA);
*/
libaroma_alpha_rgba_fill(w,
line_mem,
line_mem,
libaroma_rgb(cR, cG, cB),
cA
);
}
else{
libaroma_alpha_rgba_fill_line(_Y,w,
line_mem,
line_mem,
libaroma_rgb(cR, cG, cB),
cA
);
/*
libaroma_dither_line_const(_Y, w,
alphaTmpLine, libaroma_rgb32(cR, cG, cB));
libaroma_alpha_const_line(_Y, w,
line_mem, line_mem, alphaTmpLine, cA);*/
}
}
else{
libaroma_alpha_rgba_fill(w,line_mem,line_mem,startColor,cA);
}
}
else {
if (!samecolor){
if (noDither){
libaroma_color_set(line_mem, libaroma_rgb(cR, cG, cB), w);
}
else{
libaroma_dither_line_const(_Y, w,
line_mem, libaroma_rgb32(cR, cG, cB));
}
}
else{
libaroma_color_set(line_mem, startColor, w);
}
if (useCanvasAlpha) {
byte cA = startAlpha;
if (startAlpha!=endAlpha){
#ifdef LIBAROMA_CONFIG_GRADIENT_FLOAT
cA = ((byte) MIN(((startAlpha * r_intensity) +
(endAlpha * intensity)), 0xff));
#else
cA = ((byte) MIN((((startAlpha * r_intensity) >> 8) +
((endAlpha * intensity) >> 8)), 0xff));
#endif
}
memset(line_alpha, cA, w);
}
}
/* corners */
if (drawRound) {
if (_Y < roundSize) {
if (roundCorners[0]) {
_libaroma_gradient_draw_rounded(line_mem, line_alpha, roundTmp,
roundData, roundSize, 0, _Y);
}
if (roundCorners[1]) {
if (line_alpha != NULL) {
line_alpha = line_alpha + w - (roundSize);
}
_libaroma_gradient_draw_rounded(line_mem + w - roundSize,
line_alpha, roundTmp + roundSize, roundData, roundSize, 1, _Y);
}
}
else if (_Y >= h - roundSize) {
if (roundCorners[2]) {
_libaroma_gradient_draw_rounded(line_mem, line_alpha, roundTmp,
roundData, roundSize, 0, h - _Y - 1);
}
if (roundCorners[3]) {
if (line_alpha != NULL) {
line_alpha = line_alpha + w - (roundSize);
}
_libaroma_gradient_draw_rounded(line_mem + w - roundSize,
line_alpha, roundTmp + roundSize,
roundData, roundSize, 1, h - _Y - 1);
}
}
}
/*
if (useAlpha) {
free(alphaTmpLine);
}*/
if (roundData != NULL) {
free(roundTmp);
}
}
if (roundData != NULL) {
free(roundData);
//free(roundTmp);
}
return 1;
} /* End of libaroma_gradient_ex1 */
#endif /* __libaroma_gradient_c__ */
|
parallel_for_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s -Wuninitialized
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel for'}}
#pragma omp parallel for
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel for'}}
#pragma omp parallel for foo
void test_no_clause() {
int i;
#pragma omp parallel for
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp parallel for' must be a for loop}}
#pragma omp parallel for
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp parallel for
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for' are ignored}}
#pragma omp parallel for foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for' are ignored}}
#pragma omp parallel for;
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for' are ignored}}
#pragma omp parallel for private(x);
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for' are ignored}}
#pragma omp parallel for, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_collapse() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp parallel for collapse
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for collapse(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for collapse()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for collapse(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for collapse(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp parallel for collapse 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}}
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}}
#pragma omp parallel for collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}}
// expected-error@+1 {{integer constant expression}}
#pragma omp parallel for collapse(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{integer constant expression}}
#pragma omp parallel for collapse(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp parallel for collapse(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp parallel for collapse(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp parallel for collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
// expected-note@+1 2 {{defined as firstprivate}}
#pragma omp parallel for collapse(2) firstprivate(i)
for (i = 0; i < 16; ++i) // expected-error {{loop iteration variable in the associated loop of 'omp parallel for' directive may not be firstprivate, predetermined as private}}
// expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for' directive into a parallel or another task region?}}
for (int j = 0; j < 16; ++j)
// expected-error@+2 2 {{reduction variable must be shared}}
// expected-error@+1 {{region cannot be closely nested inside 'parallel for' region; perhaps you forget to enclose 'omp for' directive into a parallel region?}}
#pragma omp for reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_private() {
int i;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for private(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for private(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for private(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for private()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for private(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp parallel for private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel for private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp parallel for lastprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for lastprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for lastprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for lastprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for lastprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp parallel for lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel for lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp parallel for firstprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for firstprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for firstprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for firstprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for firstprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp parallel for firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel for lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp parallel for
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp parallel for
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
|
GB_binop__eq_bool.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__eq_bool
// A.*B function (eWiseMult): GB_AemultB__eq_bool
// A*D function (colscale): GB_AxD__eq_bool
// D*A function (rowscale): GB_DxB__eq_bool
// C+=B function (dense accum): GB_Cdense_accumB__eq_bool
// C+=b function (dense accum): GB_Cdense_accumb__eq_bool
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__eq_bool
// C=scalar+B GB_bind1st__eq_bool
// C=scalar+B' GB_bind1st_tran__eq_bool
// C=A+scalar GB_bind2nd__eq_bool
// C=A'+scalar GB_bind2nd_tran__eq_bool
// C type: bool
// A type: bool
// B,b type: bool
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
bool
#define GB_BTYPE \
bool
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
bool bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x == y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EQ || GxB_NO_BOOL || GxB_NO_EQ_BOOL)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__eq_bool
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__eq_bool
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__eq_bool
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type bool
bool bwork = (*((bool *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__eq_bool
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__eq_bool
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__eq_bool
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__eq_bool
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__eq_bool
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
bool x = (*((bool *) x_input)) ;
bool *Bx = (bool *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool bij = Bx [p] ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__eq_bool
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
bool *Ax = (bool *) Ax_input ;
bool y = (*((bool *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = Ax [pA] ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB_bind1st_tran__eq_bool
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
bool
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool x = (*((const bool *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
bool
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = Ax [pA] ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB_bind2nd_tran__eq_bool
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool y = (*((const bool *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.